Package gluon :: Module dal
[hide private]
[frames] | no frames]

Source Code for Module gluon.dal

    1  #!/bin/env python 
    2  # -*- coding: utf-8 -*- 
    3   
    4  """ 
    5  This file is part of the web2py Web Framework 
    6  Copyrighted by Massimo Di Pierro <mdipierro@cs.depaul.edu> 
    7  License: LGPLv3 (http://www.gnu.org/licenses/lgpl.html) 
    8   
    9  Thanks to 
   10      * Niall Sweeny <niall.sweeny@fonjax.com> for MS SQL support 
   11      * Marcel Leuthi <mluethi@mlsystems.ch> for Oracle support 
   12      * Denes 
   13      * Chris Clark 
   14      * clach05 
   15      * Denes Lengyel 
   16      * and many others who have contributed to current and previous versions 
   17   
   18  This file contains the DAL support for many relational databases, 
   19  including: 
   20  - SQLite & SpatiaLite 
   21  - MySQL 
   22  - Postgres 
   23  - Firebird 
   24  - Oracle 
   25  - MS SQL 
   26  - DB2 
   27  - Interbase 
   28  - Ingres 
   29  - Informix (9+ and SE) 
   30  - SapDB (experimental) 
   31  - Cubrid (experimental) 
   32  - CouchDB (experimental) 
   33  - MongoDB (in progress) 
   34  - Google:nosql 
   35  - Google:sql 
   36  - Teradata 
   37  - IMAP (experimental) 
   38   
   39  Example of usage: 
   40   
   41  >>> # from dal import DAL, Field 
   42   
   43  ### create DAL connection (and create DB if it doesn't exist) 
   44  >>> db = DAL(('sqlite://storage.sqlite','mysql://a:b@localhost/x'), 
   45  ... folder=None) 
   46   
   47  ### define a table 'person' (create/alter as necessary) 
   48  >>> person = db.define_table('person',Field('name','string')) 
   49   
   50  ### insert a record 
   51  >>> id = person.insert(name='James') 
   52   
   53  ### retrieve it by id 
   54  >>> james = person(id) 
   55   
   56  ### retrieve it by name 
   57  >>> james = person(name='James') 
   58   
   59  ### retrieve it by arbitrary query 
   60  >>> query = (person.name=='James') & (person.name.startswith('J')) 
   61  >>> james = db(query).select(person.ALL)[0] 
   62   
   63  ### update one record 
   64  >>> james.update_record(name='Jim') 
   65  <Row {'id': 1, 'name': 'Jim'}> 
   66   
   67  ### update multiple records by query 
   68  >>> db(person.name.like('J%')).update(name='James') 
   69  1 
   70   
   71  ### delete records by query 
   72  >>> db(person.name.lower() == 'jim').delete() 
   73  0 
   74   
   75  ### retrieve multiple records (rows) 
   76  >>> people = db(person).select(orderby=person.name, 
   77  ... groupby=person.name, limitby=(0,100)) 
   78   
   79  ### further filter them 
   80  >>> james = people.find(lambda row: row.name == 'James').first() 
   81  >>> print james.id, james.name 
   82  1 James 
   83   
   84  ### check aggregates 
   85  >>> counter = person.id.count() 
   86  >>> print db(person).select(counter).first()(counter) 
   87  1 
   88   
   89  ### delete one record 
   90  >>> james.delete_record() 
   91  1 
   92   
   93  ### delete (drop) entire database table 
   94  >>> person.drop() 
   95   
   96  Supported field types: 
   97  id string text boolean integer double decimal password upload 
   98  blob time date datetime 
   99   
  100  Supported DAL URI strings: 
  101  'sqlite://test.db' 
  102  'spatialite://test.db' 
  103  'sqlite:memory' 
  104  'spatialite:memory' 
  105  'jdbc:sqlite://test.db' 
  106  'mysql://root:none@localhost/test' 
  107  'postgres://mdipierro:password@localhost/test' 
  108  'postgres:psycopg2://mdipierro:password@localhost/test' 
  109  'postgres:pg8000://mdipierro:password@localhost/test' 
  110  'jdbc:postgres://mdipierro:none@localhost/test' 
  111  'mssql://web2py:none@A64X2/web2py_test' 
  112  'mssql2://web2py:none@A64X2/web2py_test' # alternate mappings 
  113  'oracle://username:password@database' 
  114  'firebird://user:password@server:3050/database' 
  115  'db2://DSN=dsn;UID=user;PWD=pass' 
  116  'firebird://username:password@hostname/database' 
  117  'firebird_embedded://username:password@c://path' 
  118  'informix://user:password@server:3050/database' 
  119  'informixu://user:password@server:3050/database' # unicode informix 
  120  'ingres://database'  # or use an ODBC connection string, e.g. 'ingres://dsn=dsn_name' 
  121  'google:datastore' # for google app engine datastore 
  122  'google:sql' # for google app engine with sql (mysql compatible) 
  123  'teradata://DSN=dsn;UID=user;PWD=pass; DATABASE=database' # experimental 
  124  'imap://user:password@server:port' # experimental 
  125  'mongodb://user:password@server:port/database' # experimental 
  126   
  127  For more info: 
  128  help(DAL) 
  129  help(Field) 
  130  """ 
  131   
  132  ################################################################################### 
  133  # this file only exposes DAL and Field 
  134  ################################################################################### 
  135   
  136  __all__ = ['DAL', 'Field'] 
  137   
  138  MAXCHARLENGTH = 2**15 # not quite but reasonable default max char length 
  139  DEFAULTLENGTH = {'string':512, 
  140                   'password':512, 
  141                   'upload':512, 
  142                   'text':2**15, 
  143                   'blob':2**31} 
  144  TIMINGSSIZE = 100 
  145  SPATIALLIBS = { 
  146      'Windows':'libspatialite', 
  147      'Linux':'libspatialite.so', 
  148      'Darwin':'libspatialite.dylib' 
  149      } 
  150  DEFAULT_URI = 'sqlite://dummy.db' 
  151   
  152  import re 
  153  import sys 
  154  import locale 
  155  import os 
  156  import types 
  157  import datetime 
  158  import threading 
  159  import time 
  160  import csv 
  161  import cgi 
  162  import copy 
  163  import socket 
  164  import logging 
  165  import base64 
  166  import shutil 
  167  import marshal 
  168  import decimal 
  169  import struct 
  170  import urllib 
  171  import hashlib 
  172  import uuid 
  173  import glob 
  174  import traceback 
  175  import platform 
  176   
  177  PYTHON_VERSION = sys.version_info[0] 
  178  if PYTHON_VERSION == 2: 
  179      import cPickle as pickle 
  180      import cStringIO as StringIO 
  181      import copy_reg as copyreg 
  182      hashlib_md5 = hashlib.md5 
  183      bytes, unicode = str, unicode 
  184  else: 
  185      import pickle 
  186      from io import StringIO as StringIO 
  187      import copyreg 
  188      long = int 
  189      hashlib_md5 = lambda s: hashlib.md5(bytes(s,'utf8')) 
  190      bytes, unicode = bytes, str 
  191   
  192  CALLABLETYPES = (types.LambdaType, types.FunctionType, 
  193                   types.BuiltinFunctionType, 
  194                   types.MethodType, types.BuiltinMethodType) 
  195   
  196  TABLE_ARGS = set( 
  197      ('migrate','primarykey','fake_migrate','format','redefine', 
  198       'singular','plural','trigger_name','sequence_name', 
  199       'common_filter','polymodel','table_class','on_define',)) 
  200   
  201  SELECT_ARGS = set( 
  202      ('orderby', 'groupby', 'limitby','required', 'cache', 'left', 
  203       'distinct', 'having', 'join','for_update', 'processor','cacheable', 'orderby_on_limitby')) 
  204   
  205  ogetattr = object.__getattribute__ 
  206  osetattr = object.__setattr__ 
  207  exists = os.path.exists 
  208  pjoin = os.path.join 
  209   
  210  ################################################################################### 
  211  # following checks allow the use of dal without web2py, as a standalone module 
  212  ################################################################################### 
  213  try: 
  214      from utils import web2py_uuid 
  215  except (ImportError, SystemError): 
  216      import uuid 
217 - def web2py_uuid(): return str(uuid.uuid4())
218 219 try: 220 import portalocker 221 have_portalocker = True 222 except ImportError: 223 have_portalocker = False 224 225 try: 226 import serializers 227 have_serializers = True 228 except ImportError: 229 have_serializers = False 230 try: 231 import json as simplejson 232 except ImportError: 233 try: 234 import gluon.contrib.simplejson as simplejson 235 except ImportError: 236 simplejson = None 237 238 try: 239 import validators 240 have_validators = True 241 except (ImportError, SyntaxError): 242 have_validators = False 243 244 LOGGER = logging.getLogger("web2py.dal") 245 DEFAULT = lambda:0 246 247 GLOBAL_LOCKER = threading.RLock() 248 THREAD_LOCAL = threading.local() 249 250 # internal representation of tables with field 251 # <table>.<field>, tables and fields may only be [a-zA-Z0-9_] 252 253 REGEX_TYPE = re.compile('^([\w\_\:]+)') 254 REGEX_DBNAME = re.compile('^(\w+)(\:\w+)*') 255 REGEX_W = re.compile('^\w+$') 256 REGEX_TABLE_DOT_FIELD = re.compile('^(\w+)\.(\w+)$') 257 REGEX_UPLOAD_PATTERN = re.compile('(?P<table>[\w\-]+)\.(?P<field>[\w\-]+)\.(?P<uuidkey>[\w\-]+)\.(?P<name>\w+)\.\w+$') 258 REGEX_CLEANUP_FN = re.compile('[\'"\s;]+') 259 REGEX_UNPACK = re.compile('(?<!\|)\|(?!\|)') 260 REGEX_PYTHON_KEYWORDS = re.compile('^(and|del|from|not|while|as|elif|global|or|with|assert|else|if|pass|yield|break|except|import|print|class|exec|in|raise|continue|finally|is|return|def|for|lambda|try)$') 261 REGEX_SELECT_AS_PARSER = re.compile("\s+AS\s+(\S+)") 262 REGEX_CONST_STRING = re.compile('(\"[^\"]*?\")|(\'[^\']*?\')') 263 REGEX_SEARCH_PATTERN = re.compile('^{[^\.]+\.[^\.]+(\.(lt|gt|le|ge|eq|ne|contains|startswith|year|month|day|hour|minute|second))?(\.not)?}$') 264 REGEX_SQUARE_BRACKETS = re.compile('^.+\[.+\]$') 265 REGEX_STORE_PATTERN = re.compile('\.(?P<e>\w{1,5})$') 266 REGEX_QUOTES = re.compile("'[^']*'") 267 REGEX_ALPHANUMERIC = re.compile('^[0-9a-zA-Z]\w*$') 268 REGEX_PASSWORD = re.compile('\://([^:@]*)\:') 269 REGEX_NOPASSWD = re.compile('(?<=\:)([^:@/]+)(?=@.+)') 270 271 # list of drivers will be built on the fly 272 # and lists only what is available 273 DRIVERS = [] 274 275 try: 276 from new import classobj 277 from google.appengine.ext import db as gae 278 from google.appengine.api import namespace_manager, rdbms 279 from google.appengine.api.datastore_types import Key ### for belongs on ID 280 from google.appengine.ext.db.polymodel import PolyModel 281 DRIVERS.append('google') 282 except ImportError: 283 pass 284 285 if not 'google' in DRIVERS: 286 287 try: 288 from pysqlite2 import dbapi2 as sqlite2 289 DRIVERS.append('SQLite(sqlite2)') 290 except ImportError: 291 LOGGER.debug('no SQLite drivers pysqlite2.dbapi2') 292 293 try: 294 from sqlite3 import dbapi2 as sqlite3 295 DRIVERS.append('SQLite(sqlite3)') 296 except ImportError: 297 LOGGER.debug('no SQLite drivers sqlite3') 298 299 try: 300 # first try contrib driver, then from site-packages (if installed) 301 try: 302 import contrib.pymysql as pymysql 303 # monkeypatch pymysql because they havent fixed the bug: 304 # https://github.com/petehunt/PyMySQL/issues/86 305 pymysql.ESCAPE_REGEX = re.compile("'") 306 pymysql.ESCAPE_MAP = {"'": "''"} 307 # end monkeypatch 308 except ImportError: 309 import pymysql 310 DRIVERS.append('MySQL(pymysql)') 311 except ImportError: 312 LOGGER.debug('no MySQL driver pymysql') 313 314 try: 315 import MySQLdb 316 DRIVERS.append('MySQL(MySQLdb)') 317 except ImportError: 318 LOGGER.debug('no MySQL driver MySQLDB') 319 320 321 try: 322 import psycopg2 323 from psycopg2.extensions import adapt as psycopg2_adapt 324 DRIVERS.append('PostgreSQL(psycopg2)') 325 except ImportError: 326 LOGGER.debug('no PostgreSQL driver psycopg2') 327 328 try: 329 # first try contrib driver, then from site-packages (if installed) 330 try: 331 import contrib.pg8000.dbapi as pg8000 332 except ImportError: 333 import pg8000.dbapi as pg8000 334 DRIVERS.append('PostgreSQL(pg8000)') 335 except ImportError: 336 LOGGER.debug('no PostgreSQL driver pg8000') 337 338 try: 339 import cx_Oracle 340 DRIVERS.append('Oracle(cx_Oracle)') 341 except ImportError: 342 LOGGER.debug('no Oracle driver cx_Oracle') 343 344 try: 345 try: 346 import pyodbc 347 except ImportError: 348 try: 349 import contrib.pypyodbc as pyodbc 350 except Exception, e: 351 raise ImportError(str(e)) 352 DRIVERS.append('MSSQL(pyodbc)') 353 DRIVERS.append('DB2(pyodbc)') 354 DRIVERS.append('Teradata(pyodbc)') 355 DRIVERS.append('Ingres(pyodbc)') 356 except ImportError: 357 LOGGER.debug('no MSSQL/DB2/Teradata/Ingres driver pyodbc') 358 359 try: 360 import Sybase 361 DRIVERS.append('Sybase(Sybase)') 362 except ImportError: 363 LOGGER.debug('no Sybase driver') 364 365 try: 366 import kinterbasdb 367 DRIVERS.append('Interbase(kinterbasdb)') 368 DRIVERS.append('Firebird(kinterbasdb)') 369 except ImportError: 370 LOGGER.debug('no Firebird/Interbase driver kinterbasdb') 371 372 try: 373 import fdb 374 DRIVERS.append('Firebird(fdb)') 375 except ImportError: 376 LOGGER.debug('no Firebird driver fdb') 377 ##### 378 try: 379 import firebirdsql 380 DRIVERS.append('Firebird(firebirdsql)') 381 except ImportError: 382 LOGGER.debug('no Firebird driver firebirdsql') 383 384 try: 385 import informixdb 386 DRIVERS.append('Informix(informixdb)') 387 LOGGER.warning('Informix support is experimental') 388 except ImportError: 389 LOGGER.debug('no Informix driver informixdb') 390 391 try: 392 import sapdb 393 DRIVERS.append('SQL(sapdb)') 394 LOGGER.warning('SAPDB support is experimental') 395 except ImportError: 396 LOGGER.debug('no SAP driver sapdb') 397 398 try: 399 import cubriddb 400 DRIVERS.append('Cubrid(cubriddb)') 401 LOGGER.warning('Cubrid support is experimental') 402 except ImportError: 403 LOGGER.debug('no Cubrid driver cubriddb') 404 405 try: 406 from com.ziclix.python.sql import zxJDBC 407 import java.sql 408 # Try sqlite jdbc driver from http://www.zentus.com/sqlitejdbc/ 409 from org.sqlite import JDBC # required by java.sql; ensure we have it 410 zxJDBC_sqlite = java.sql.DriverManager 411 DRIVERS.append('PostgreSQL(zxJDBC)') 412 DRIVERS.append('SQLite(zxJDBC)') 413 LOGGER.warning('zxJDBC support is experimental') 414 is_jdbc = True 415 except ImportError: 416 LOGGER.debug('no SQLite/PostgreSQL driver zxJDBC') 417 is_jdbc = False 418 419 try: 420 import couchdb 421 DRIVERS.append('CouchDB(couchdb)') 422 except ImportError: 423 LOGGER.debug('no Couchdb driver couchdb') 424 425 try: 426 import pymongo 427 DRIVERS.append('MongoDB(pymongo)') 428 except: 429 LOGGER.debug('no MongoDB driver pymongo') 430 431 try: 432 import imaplib 433 DRIVERS.append('IMAP(imaplib)') 434 except: 435 LOGGER.debug('no IMAP driver imaplib') 436 437 PLURALIZE_RULES = [ 438 (re.compile('child$'), re.compile('child$'), 'children'), 439 (re.compile('oot$'), re.compile('oot$'), 'eet'), 440 (re.compile('ooth$'), re.compile('ooth$'), 'eeth'), 441 (re.compile('l[eo]af$'), re.compile('l([eo])af$'), 'l\\1aves'), 442 (re.compile('sis$'), re.compile('sis$'), 'ses'), 443 (re.compile('man$'), re.compile('man$'), 'men'), 444 (re.compile('ife$'), re.compile('ife$'), 'ives'), 445 (re.compile('eau$'), re.compile('eau$'), 'eaux'), 446 (re.compile('lf$'), re.compile('lf$'), 'lves'), 447 (re.compile('[sxz]$'), re.compile('$'), 'es'), 448 (re.compile('[^aeioudgkprt]h$'), re.compile('$'), 'es'), 449 (re.compile('(qu|[^aeiou])y$'), re.compile('y$'), 'ies'), 450 (re.compile('$'), re.compile('$'), 's'), 451 ]
452 453 -def pluralize(singular, rules=PLURALIZE_RULES):
454 for line in rules: 455 re_search, re_sub, replace = line 456 plural = re_search.search(singular) and re_sub.sub(replace, singular) 457 if plural: return plural
458
459 -def hide_password(uri):
460 return REGEX_NOPASSWD.sub('******',uri)
461
462 -def OR(a,b):
463 return a|b
464
465 -def AND(a,b):
466 return a&b
467
468 -def IDENTITY(x): return x
469
470 -def varquote_aux(name,quotestr='%s'):
471 return name if REGEX_W.match(name) else quotestr % name
472 473 if 'google' in DRIVERS: 474 475 is_jdbc = False
476 477 - class GAEDecimalProperty(gae.Property):
478 """ 479 GAE decimal implementation 480 """ 481 data_type = decimal.Decimal 482
483 - def __init__(self, precision, scale, **kwargs):
484 super(GAEDecimalProperty, self).__init__(self, **kwargs) 485 d = '1.' 486 for x in range(scale): 487 d += '0' 488 self.round = decimal.Decimal(d)
489
490 - def get_value_for_datastore(self, model_instance):
491 value = super(GAEDecimalProperty, self)\ 492 .get_value_for_datastore(model_instance) 493 if value is None or value == '': 494 return None 495 else: 496 return str(value)
497
498 - def make_value_from_datastore(self, value):
499 if value is None or value == '': 500 return None 501 else: 502 return decimal.Decimal(value).quantize(self.round)
503
504 - def validate(self, value):
505 value = super(GAEDecimalProperty, self).validate(value) 506 if value is None or isinstance(value, decimal.Decimal): 507 return value 508 elif isinstance(value, basestring): 509 return decimal.Decimal(value) 510 raise gae.BadValueError("Property %s must be a Decimal or string."\ 511 % self.name)
512
513 ################################################################################### 514 # class that handles connection pooling (all adapters are derived from this one) 515 ################################################################################### 516 517 -class ConnectionPool(object):
518 519 POOLS = {} 520 check_active_connection = True 521 522 @staticmethod
523 - def set_folder(folder):
524 THREAD_LOCAL.folder = folder
525 526 # ## this allows gluon to commit/rollback all dbs in this thread 527
528 - def close(self,action='commit',really=True):
529 if action: 530 if callable(action): 531 action(self) 532 else: 533 getattr(self, action)() 534 # ## if you want pools, recycle this connection 535 if self.pool_size: 536 GLOBAL_LOCKER.acquire() 537 pool = ConnectionPool.POOLS[self.uri] 538 if len(pool) < self.pool_size: 539 pool.append(self.connection) 540 really = False 541 GLOBAL_LOCKER.release() 542 if really: 543 self.close_connection() 544 self.connection = None
545 546 @staticmethod
547 - def close_all_instances(action):
548 """ to close cleanly databases in a multithreaded environment """ 549 dbs = getattr(THREAD_LOCAL,'db_instances',{}).items() 550 for db_uid, db_group in dbs: 551 for db in db_group: 552 if hasattr(db,'_adapter'): 553 db._adapter.close(action) 554 getattr(THREAD_LOCAL,'db_instances',{}).clear() 555 getattr(THREAD_LOCAL,'db_instances_zombie',{}).clear() 556 if callable(action): 557 action(None) 558 return
559
560 - def find_or_make_work_folder(self):
561 """ this actually does not make the folder. it has to be there """ 562 self.folder = getattr(THREAD_LOCAL,'folder','') 563 564 # Creating the folder if it does not exist 565 if False and self.folder and not exists(self.folder): 566 os.mkdir(self.folder)
567
568 - def after_connection_hook(self):
569 """hook for the after_connection parameter""" 570 if callable(self._after_connection): 571 self._after_connection(self) 572 self.after_connection()
573
574 - def after_connection(self):
575 """ this it is supposed to be overloaded by adapters""" 576 pass
577
578 - def reconnect(self, f=None, cursor=True):
579 """ 580 this function defines: self.connection and self.cursor 581 (iff cursor is True) 582 if self.pool_size>0 it will try pull the connection from the pool 583 if the connection is not active (closed by db server) it will loop 584 if not self.pool_size or no active connections in pool makes a new one 585 """ 586 if getattr(self,'connection',None) != None: 587 return 588 if f is None: 589 f = self.connector 590 591 if not self.pool_size: 592 self.connection = f() 593 self.cursor = cursor and self.connection.cursor() 594 else: 595 uri = self.uri 596 POOLS = ConnectionPool.POOLS 597 while True: 598 GLOBAL_LOCKER.acquire() 599 if not uri in POOLS: 600 POOLS[uri] = [] 601 if POOLS[uri]: 602 self.connection = POOLS[uri].pop() 603 GLOBAL_LOCKER.release() 604 self.cursor = cursor and self.connection.cursor() 605 try: 606 if self.cursor and self.check_active_connection: 607 self.execute('SELECT 1;') 608 break 609 except: 610 pass 611 else: 612 GLOBAL_LOCKER.release() 613 self.connection = f() 614 self.cursor = cursor and self.connection.cursor() 615 break 616 self.after_connection_hook()
617
618 619 ################################################################################### 620 # this is a generic adapter that does nothing; all others are derived from this one 621 ################################################################################### 622 623 -class BaseAdapter(ConnectionPool):
624 native_json = False 625 driver = None 626 driver_name = None 627 drivers = () # list of drivers from which to pick 628 connection = None 629 maxcharlength = MAXCHARLENGTH 630 commit_on_alter_table = False 631 support_distributed_transaction = False 632 uploads_in_blob = False 633 can_select_for_update = True 634 635 TRUE = 'T' 636 FALSE = 'F' 637 T_SEP = ' ' 638 types = { 639 'boolean': 'CHAR(1)', 640 'string': 'CHAR(%(length)s)', 641 'text': 'TEXT', 642 'json': 'TEXT', 643 'password': 'CHAR(%(length)s)', 644 'blob': 'BLOB', 645 'upload': 'CHAR(%(length)s)', 646 'integer': 'INTEGER', 647 'bigint': 'INTEGER', 648 'float':'DOUBLE', 649 'double': 'DOUBLE', 650 'decimal': 'DOUBLE', 651 'date': 'DATE', 652 'time': 'TIME', 653 'datetime': 'TIMESTAMP', 654 'id': 'INTEGER PRIMARY KEY AUTOINCREMENT', 655 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 656 'list:integer': 'TEXT', 657 'list:string': 'TEXT', 658 'list:reference': 'TEXT', 659 # the two below are only used when DAL(...bigint_id=True) and replace 'id','reference' 660 'big-id': 'BIGINT PRIMARY KEY AUTOINCREMENT', 661 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 662 } 663
664 - def id_query(self, table):
665 return table._id != None
666
667 - def adapt(self, obj):
668 return "'%s'" % obj.replace("'", "''")
669
670 - def smart_adapt(self, obj):
671 if isinstance(obj,(int,float)): 672 return str(obj) 673 return self.adapt(str(obj))
674
675 - def integrity_error(self):
676 return self.driver.IntegrityError
677
678 - def operational_error(self):
679 return self.driver.OperationalError
680
681 - def file_exists(self, filename):
682 """ 683 to be used ONLY for files that on GAE may not be on filesystem 684 """ 685 return exists(filename)
686
687 - def file_open(self, filename, mode='rb', lock=True):
688 """ 689 to be used ONLY for files that on GAE may not be on filesystem 690 """ 691 if have_portalocker and lock: 692 fileobj = portalocker.LockedFile(filename,mode) 693 else: 694 fileobj = open(filename,mode) 695 return fileobj
696
697 - def file_close(self, fileobj):
698 """ 699 to be used ONLY for files that on GAE may not be on filesystem 700 """ 701 if fileobj: 702 fileobj.close()
703
704 - def file_delete(self, filename):
705 os.unlink(filename)
706
707 - def find_driver(self,adapter_args,uri=None):
708 if getattr(self,'driver',None) != None: 709 return 710 drivers_available = [driver for driver in self.drivers 711 if driver in globals()] 712 if uri: 713 items = uri.split('://',1)[0].split(':') 714 request_driver = items[1] if len(items)>1 else None 715 else: 716 request_driver = None 717 request_driver = request_driver or adapter_args.get('driver') 718 if request_driver: 719 if request_driver in drivers_available: 720 self.driver_name = request_driver 721 self.driver = globals().get(request_driver) 722 else: 723 raise RuntimeError("driver %s not available" % request_driver) 724 elif drivers_available: 725 self.driver_name = drivers_available[0] 726 self.driver = globals().get(self.driver_name) 727 else: 728 raise RuntimeError("no driver available %s" % str(self.drivers))
729 730
731 - def __init__(self, db,uri,pool_size=0, folder=None, db_codec='UTF-8', 732 credential_decoder=IDENTITY, driver_args={}, 733 adapter_args={},do_connect=True, after_connection=None):
734 self.db = db 735 self.dbengine = "None" 736 self.uri = uri 737 self.pool_size = pool_size 738 self.folder = folder 739 self.db_codec = db_codec 740 self._after_connection = after_connection 741 class Dummy(object): 742 lastrowid = 1 743 def __getattr__(self, value): 744 return lambda *a, **b: []
745 self.connection = Dummy() 746 self.cursor = Dummy() 747
748 - def sequence_name(self,tablename):
749 return '%s_sequence' % tablename
750
751 - def trigger_name(self,tablename):
752 return '%s_sequence' % tablename
753
754 - def varquote(self,name):
755 return name
756
757 - def create_table(self, table, 758 migrate=True, 759 fake_migrate=False, 760 polymodel=None):
761 db = table._db 762 fields = [] 763 # PostGIS geo fields are added after the table has been created 764 postcreation_fields = [] 765 sql_fields = {} 766 sql_fields_aux = {} 767 TFK = {} 768 tablename = table._tablename 769 sortable = 0 770 types = self.types 771 for field in table: 772 sortable += 1 773 field_name = field.name 774 field_type = field.type 775 if isinstance(field_type,SQLCustomType): 776 ftype = field_type.native or field_type.type 777 elif field_type.startswith('reference'): 778 referenced = field_type[10:].strip() 779 if referenced == '.': 780 referenced = tablename 781 constraint_name = self.constraint_name(tablename, field_name) 782 if not '.' in referenced \ 783 and referenced != tablename \ 784 and hasattr(table,'_primarykey'): 785 ftype = types['integer'] 786 else: 787 if hasattr(table,'_primarykey'): 788 rtablename,rfieldname = referenced.split('.') 789 rtable = db[rtablename] 790 rfield = rtable[rfieldname] 791 # must be PK reference or unique 792 if rfieldname in rtable._primarykey or \ 793 rfield.unique: 794 ftype = types[rfield.type[:9]] % \ 795 dict(length=rfield.length) 796 # multicolumn primary key reference? 797 if not rfield.unique and len(rtable._primarykey)>1: 798 # then it has to be a table level FK 799 if rtablename not in TFK: 800 TFK[rtablename] = {} 801 TFK[rtablename][rfieldname] = field_name 802 else: 803 ftype = ftype + \ 804 types['reference FK'] % dict( 805 constraint_name = constraint_name, # should be quoted 806 foreign_key = '%s (%s)' % (rtablename, 807 rfieldname), 808 table_name = tablename, 809 field_name = field_name, 810 on_delete_action=field.ondelete) 811 else: 812 # make a guess here for circular references 813 if referenced in db: 814 id_fieldname = db[referenced]._id.name 815 elif referenced == tablename: 816 id_fieldname = table._id.name 817 else: #make a guess 818 id_fieldname = 'id' 819 ftype = types[field_type[:9]] % dict( 820 index_name = field_name+'__idx', 821 field_name = field_name, 822 constraint_name = constraint_name, 823 foreign_key = '%s (%s)' % (referenced, 824 id_fieldname), 825 on_delete_action=field.ondelete) 826 elif field_type.startswith('list:reference'): 827 ftype = types[field_type[:14]] 828 elif field_type.startswith('decimal'): 829 precision, scale = map(int,field_type[8:-1].split(',')) 830 ftype = types[field_type[:7]] % \ 831 dict(precision=precision,scale=scale) 832 elif field_type.startswith('geo'): 833 if not hasattr(self,'srid'): 834 raise RuntimeError('Adapter does not support geometry') 835 srid = self.srid 836 geotype, parms = field_type[:-1].split('(') 837 if not geotype in types: 838 raise SyntaxError( 839 'Field: unknown field type: %s for %s' \ 840 % (field_type, field_name)) 841 ftype = types[geotype] 842 if self.dbengine == 'postgres' and geotype == 'geometry': 843 # parameters: schema, srid, dimension 844 dimension = 2 # GIS.dimension ??? 845 parms = parms.split(',') 846 if len(parms) == 3: 847 schema, srid, dimension = parms 848 elif len(parms) == 2: 849 schema, srid = parms 850 else: 851 schema = parms[0] 852 ftype = "SELECT AddGeometryColumn ('%%(schema)s', '%%(tablename)s', '%%(fieldname)s', %%(srid)s, '%s', %%(dimension)s);" % types[geotype] 853 ftype = ftype % dict(schema=schema, 854 tablename=tablename, 855 fieldname=field_name, srid=srid, 856 dimension=dimension) 857 postcreation_fields.append(ftype) 858 elif not field_type in types: 859 raise SyntaxError('Field: unknown field type: %s for %s' % \ 860 (field_type, field_name)) 861 else: 862 ftype = types[field_type]\ 863 % dict(length=field.length) 864 if not field_type.startswith('id') and \ 865 not field_type.startswith('reference'): 866 if field.notnull: 867 ftype += ' NOT NULL' 868 else: 869 ftype += self.ALLOW_NULL() 870 if field.unique: 871 ftype += ' UNIQUE' 872 if field.custom_qualifier: 873 ftype += ' %s' % field.custom_qualifier 874 875 # add to list of fields 876 sql_fields[field_name] = dict( 877 length=field.length, 878 unique=field.unique, 879 notnull=field.notnull, 880 sortable=sortable, 881 type=str(field_type), 882 sql=ftype) 883 884 if field.notnull and not field.default is None: 885 # Caveat: sql_fields and sql_fields_aux 886 # differ for default values. 887 # sql_fields is used to trigger migrations and sql_fields_aux 888 # is used for create tables. 889 # The reason is that we do not want to trigger 890 # a migration simply because a default value changes. 891 not_null = self.NOT_NULL(field.default, field_type) 892 ftype = ftype.replace('NOT NULL', not_null) 893 sql_fields_aux[field_name] = dict(sql=ftype) 894 # Postgres - PostGIS: 895 # geometry fields are added after the table has been created, not now 896 if not (self.dbengine == 'postgres' and \ 897 field_type.startswith('geom')): 898 fields.append('%s %s' % (field_name, ftype)) 899 other = ';' 900 901 # backend-specific extensions to fields 902 if self.dbengine == 'mysql': 903 if not hasattr(table, "_primarykey"): 904 fields.append('PRIMARY KEY(%s)' % table._id.name) 905 other = ' ENGINE=InnoDB CHARACTER SET utf8;' 906 907 fields = ',\n '.join(fields) 908 for rtablename in TFK: 909 rfields = TFK[rtablename] 910 pkeys = db[rtablename]._primarykey 911 fkeys = [ rfields[k] for k in pkeys ] 912 fields = fields + ',\n ' + \ 913 types['reference TFK'] % dict( 914 table_name = tablename, 915 field_name=', '.join(fkeys), 916 foreign_table = rtablename, 917 foreign_key = ', '.join(pkeys), 918 on_delete_action = field.ondelete) 919 920 if hasattr(table,'_primarykey'): 921 query = "CREATE TABLE %s(\n %s,\n %s) %s" % \ 922 (tablename, fields, 923 self.PRIMARY_KEY(', '.join(table._primarykey)),other) 924 else: 925 query = "CREATE TABLE %s(\n %s\n)%s" % \ 926 (tablename, fields, other) 927 928 if self.uri.startswith('sqlite:///') \ 929 or self.uri.startswith('spatialite:///'): 930 path_encoding = sys.getfilesystemencoding() \ 931 or locale.getdefaultlocale()[1] or 'utf8' 932 dbpath = self.uri[9:self.uri.rfind('/')]\ 933 .decode('utf8').encode(path_encoding) 934 else: 935 dbpath = self.folder 936 937 if not migrate: 938 return query 939 elif self.uri.startswith('sqlite:memory')\ 940 or self.uri.startswith('spatialite:memory'): 941 table._dbt = None 942 elif isinstance(migrate, str): 943 table._dbt = pjoin(dbpath, migrate) 944 else: 945 table._dbt = pjoin( 946 dbpath, '%s_%s.table' % (table._db._uri_hash, tablename)) 947 948 if table._dbt: 949 table._loggername = pjoin(dbpath, 'sql.log') 950 logfile = self.file_open(table._loggername, 'a') 951 else: 952 logfile = None 953 if not table._dbt or not self.file_exists(table._dbt): 954 if table._dbt: 955 logfile.write('timestamp: %s\n' 956 % datetime.datetime.today().isoformat()) 957 logfile.write(query + '\n') 958 if not fake_migrate: 959 self.create_sequence_and_triggers(query,table) 960 table._db.commit() 961 # Postgres geom fields are added now, 962 # after the table has been created 963 for query in postcreation_fields: 964 self.execute(query) 965 table._db.commit() 966 if table._dbt: 967 tfile = self.file_open(table._dbt, 'w') 968 pickle.dump(sql_fields, tfile) 969 self.file_close(tfile) 970 if fake_migrate: 971 logfile.write('faked!\n') 972 else: 973 logfile.write('success!\n') 974 else: 975 tfile = self.file_open(table._dbt, 'r') 976 try: 977 sql_fields_old = pickle.load(tfile) 978 except EOFError: 979 self.file_close(tfile) 980 self.file_close(logfile) 981 raise RuntimeError('File %s appears corrupted' % table._dbt) 982 self.file_close(tfile) 983 if sql_fields != sql_fields_old: 984 self.migrate_table(table, 985 sql_fields, sql_fields_old, 986 sql_fields_aux, logfile, 987 fake_migrate=fake_migrate) 988 self.file_close(logfile) 989 return query
990
991 - def migrate_table( 992 self, 993 table, 994 sql_fields, 995 sql_fields_old, 996 sql_fields_aux, 997 logfile, 998 fake_migrate=False, 999 ):
1000 db = table._db 1001 db._migrated.append(table._tablename) 1002 tablename = table._tablename 1003 def fix(item): 1004 k,v=item 1005 if not isinstance(v,dict): 1006 v=dict(type='unkown',sql=v) 1007 return k.lower(),v
1008 # make sure all field names are lower case to avoid 1009 # migrations because of case cahnge 1010 sql_fields = dict(map(fix,sql_fields.iteritems())) 1011 sql_fields_old = dict(map(fix,sql_fields_old.iteritems())) 1012 sql_fields_aux = dict(map(fix,sql_fields_aux.iteritems())) 1013 if db._debug: 1014 logging.debug('migrating %s to %s' % (sql_fields_old,sql_fields)) 1015 1016 keys = sql_fields.keys() 1017 for key in sql_fields_old: 1018 if not key in keys: 1019 keys.append(key) 1020 new_add = self.concat_add(tablename) 1021 1022 metadata_change = False 1023 sql_fields_current = copy.copy(sql_fields_old) 1024 for key in keys: 1025 query = None 1026 if not key in sql_fields_old: 1027 sql_fields_current[key] = sql_fields[key] 1028 if self.dbengine in ('postgres',) and \ 1029 sql_fields[key]['type'].startswith('geometry'): 1030 # 'sql' == ftype in sql 1031 query = [ sql_fields[key]['sql'] ] 1032 else: 1033 query = ['ALTER TABLE %s ADD %s %s;' % \ 1034 (tablename, key, 1035 sql_fields_aux[key]['sql'].replace(', ', new_add))] 1036 metadata_change = True 1037 elif self.dbengine in ('sqlite', 'spatialite'): 1038 if key in sql_fields: 1039 sql_fields_current[key] = sql_fields[key] 1040 metadata_change = True 1041 elif not key in sql_fields: 1042 del sql_fields_current[key] 1043 ftype = sql_fields_old[key]['type'] 1044 if self.dbengine in ('postgres',) \ 1045 and ftype.startswith('geometry'): 1046 geotype, parms = ftype[:-1].split('(') 1047 schema = parms.split(',')[0] 1048 query = [ "SELECT DropGeometryColumn ('%(schema)s', '%(table)s', '%(field)s');" % dict(schema=schema, table=tablename, field=key,) ] 1049 elif not self.dbengine in ('firebird',): 1050 query = ['ALTER TABLE %s DROP COLUMN %s;' 1051 % (tablename, key)] 1052 else: 1053 query = ['ALTER TABLE %s DROP %s;' % (tablename, key)] 1054 metadata_change = True 1055 elif sql_fields[key]['sql'] != sql_fields_old[key]['sql'] \ 1056 and not (key in table.fields and 1057 isinstance(table[key].type, SQLCustomType)) \ 1058 and not sql_fields[key]['type'].startswith('reference')\ 1059 and not sql_fields[key]['type'].startswith('double')\ 1060 and not sql_fields[key]['type'].startswith('id'): 1061 sql_fields_current[key] = sql_fields[key] 1062 t = tablename 1063 tt = sql_fields_aux[key]['sql'].replace(', ', new_add) 1064 if not self.dbengine in ('firebird',): 1065 query = ['ALTER TABLE %s ADD %s__tmp %s;' % (t, key, tt), 1066 'UPDATE %s SET %s__tmp=%s;' % (t, key, key), 1067 'ALTER TABLE %s DROP COLUMN %s;' % (t, key), 1068 'ALTER TABLE %s ADD %s %s;' % (t, key, tt), 1069 'UPDATE %s SET %s=%s__tmp;' % (t, key, key), 1070 'ALTER TABLE %s DROP COLUMN %s__tmp;' % (t, key)] 1071 else: 1072 query = ['ALTER TABLE %s ADD %s__tmp %s;' % (t, key, tt), 1073 'UPDATE %s SET %s__tmp=%s;' % (t, key, key), 1074 'ALTER TABLE %s DROP %s;' % (t, key), 1075 'ALTER TABLE %s ADD %s %s;' % (t, key, tt), 1076 'UPDATE %s SET %s=%s__tmp;' % (t, key, key), 1077 'ALTER TABLE %s DROP %s__tmp;' % (t, key)] 1078 metadata_change = True 1079 elif sql_fields[key]['type'] != sql_fields_old[key]['type']: 1080 sql_fields_current[key] = sql_fields[key] 1081 metadata_change = True 1082 1083 if query: 1084 logfile.write('timestamp: %s\n' 1085 % datetime.datetime.today().isoformat()) 1086 db['_lastsql'] = '\n'.join(query) 1087 for sub_query in query: 1088 logfile.write(sub_query + '\n') 1089 if not fake_migrate: 1090 self.execute(sub_query) 1091 # Caveat: mysql, oracle and firebird do not allow multiple alter table 1092 # in one transaction so we must commit partial transactions and 1093 # update table._dbt after alter table. 1094 if db._adapter.commit_on_alter_table: 1095 db.commit() 1096 tfile = self.file_open(table._dbt, 'w') 1097 pickle.dump(sql_fields_current, tfile) 1098 self.file_close(tfile) 1099 logfile.write('success!\n') 1100 else: 1101 logfile.write('faked!\n') 1102 elif metadata_change: 1103 tfile = self.file_open(table._dbt, 'w') 1104 pickle.dump(sql_fields_current, tfile) 1105 self.file_close(tfile) 1106 1107 if metadata_change and \ 1108 not (query and self.dbengine in ('mysql','oracle','firebird')): 1109 db.commit() 1110 tfile = self.file_open(table._dbt, 'w') 1111 pickle.dump(sql_fields_current, tfile) 1112 self.file_close(tfile) 1113
1114 - def LOWER(self, first):
1115 return 'LOWER(%s)' % self.expand(first)
1116
1117 - def UPPER(self, first):
1118 return 'UPPER(%s)' % self.expand(first)
1119
1120 - def COUNT(self, first, distinct=None):
1121 return ('COUNT(%s)' if not distinct else 'COUNT(DISTINCT %s)') \ 1122 % self.expand(first)
1123
1124 - def EXTRACT(self, first, what):
1125 return "EXTRACT(%s FROM %s)" % (what, self.expand(first))
1126
1127 - def EPOCH(self, first):
1128 return self.EXTRACT(first, 'epoch')
1129
1130 - def LENGTH(self, first):
1131 return "LENGTH(%s)" % self.expand(first)
1132
1133 - def AGGREGATE(self, first, what):
1134 return "%s(%s)" % (what, self.expand(first))
1135
1136 - def JOIN(self):
1137 return 'JOIN'
1138
1139 - def LEFT_JOIN(self):
1140 return 'LEFT JOIN'
1141
1142 - def RANDOM(self):
1143 return 'Random()'
1144
1145 - def NOT_NULL(self, default, field_type):
1146 return 'NOT NULL DEFAULT %s' % self.represent(default,field_type)
1147
1148 - def COALESCE(self, first, second):
1149 expressions = [self.expand(first)]+[self.expand(e) for e in second] 1150 return 'COALESCE(%s)' % ','.join(expressions)
1151
1152 - def COALESCE_ZERO(self, first):
1153 return 'COALESCE(%s,0)' % self.expand(first)
1154
1155 - def RAW(self, first):
1156 return first
1157
1158 - def ALLOW_NULL(self):
1159 return ''
1160
1161 - def SUBSTRING(self, field, parameters):
1162 return 'SUBSTR(%s,%s,%s)' % (self.expand(field), parameters[0], parameters[1])
1163
1164 - def PRIMARY_KEY(self, key):
1165 return 'PRIMARY KEY(%s)' % key
1166
1167 - def _drop(self, table, mode):
1168 return ['DROP TABLE %s;' % table]
1169
1170 - def drop(self, table, mode=''):
1171 db = table._db 1172 if table._dbt: 1173 logfile = self.file_open(table._loggername, 'a') 1174 queries = self._drop(table, mode) 1175 for query in queries: 1176 if table._dbt: 1177 logfile.write(query + '\n') 1178 self.execute(query) 1179 db.commit() 1180 del db[table._tablename] 1181 del db.tables[db.tables.index(table._tablename)] 1182 db._remove_references_to(table) 1183 if table._dbt: 1184 self.file_delete(table._dbt) 1185 logfile.write('success!\n')
1186
1187 - def _insert(self, table, fields):
1188 if fields: 1189 keys = ','.join(f.name for f, v in fields) 1190 values = ','.join(self.expand(v, f.type) for f, v in fields) 1191 return 'INSERT INTO %s(%s) VALUES (%s);' % (table, keys, values) 1192 else: 1193 return self._insert_empty(table)
1194
1195 - def _insert_empty(self, table):
1196 return 'INSERT INTO %s DEFAULT VALUES;' % table
1197
1198 - def insert(self, table, fields):
1199 query = self._insert(table,fields) 1200 try: 1201 self.execute(query) 1202 except Exception: 1203 e = sys.exc_info()[1] 1204 if isinstance(e,self.integrity_error_class()): 1205 return None 1206 raise e 1207 if hasattr(table,'_primarykey'): 1208 return dict([(k[0].name, k[1]) for k in fields \ 1209 if k[0].name in table._primarykey]) 1210 id = self.lastrowid(table) 1211 if not isinstance(id,int): 1212 return id 1213 rid = Reference(id) 1214 (rid._table, rid._record) = (table, None) 1215 return rid
1216
1217 - def bulk_insert(self, table, items):
1218 return [self.insert(table,item) for item in items]
1219
1220 - def NOT(self, first):
1221 return '(NOT %s)' % self.expand(first)
1222
1223 - def AND(self, first, second):
1224 return '(%s AND %s)' % (self.expand(first), self.expand(second))
1225
1226 - def OR(self, first, second):
1227 return '(%s OR %s)' % (self.expand(first), self.expand(second))
1228
1229 - def BELONGS(self, first, second):
1230 if isinstance(second, str): 1231 return '(%s IN (%s))' % (self.expand(first), second[:-1]) 1232 elif not second: 1233 return '(1=0)' 1234 items = ','.join(self.expand(item, first.type) for item in second) 1235 return '(%s IN (%s))' % (self.expand(first), items)
1236
1237 - def REGEXP(self, first, second):
1238 "regular expression operator" 1239 raise NotImplementedError
1240
1241 - def LIKE(self, first, second):
1242 "case sensitive like operator" 1243 raise NotImplementedError
1244
1245 - def ILIKE(self, first, second):
1246 "case in-sensitive like operator" 1247 return '(%s LIKE %s)' % (self.expand(first), 1248 self.expand(second, 'string'))
1249
1250 - def STARTSWITH(self, first, second):
1251 return '(%s LIKE %s)' % (self.expand(first), 1252 self.expand(second+'%', 'string'))
1253
1254 - def ENDSWITH(self, first, second):
1255 return '(%s LIKE %s)' % (self.expand(first), 1256 self.expand('%'+second, 'string'))
1257
1258 - def CONTAINS(self,first,second,case_sensitive=False):
1259 if first.type in ('string','text', 'json'): 1260 second = Expression(None,self.CONCAT('%',Expression( 1261 None,self.REPLACE(second,('%','%%'))),'%')) 1262 elif first.type.startswith('list:'): 1263 second = Expression(None,self.CONCAT('%|',Expression(None,self.REPLACE( 1264 Expression(None,self.REPLACE(second,('%','%%'))),('|','||'))),'|%')) 1265 op = case_sensitive and self.LIKE or self.ILIKE 1266 return op(first,second) 1267
1268 - def EQ(self, first, second=None):
1269 if second is None: 1270 return '(%s IS NULL)' % self.expand(first) 1271 return '(%s = %s)' % (self.expand(first), 1272 self.expand(second, first.type))
1273
1274 - def NE(self, first, second=None):
1275 if second is None: 1276 return '(%s IS NOT NULL)' % self.expand(first) 1277 return '(%s <> %s)' % (self.expand(first), 1278 self.expand(second, first.type))
1279
1280 - def LT(self,first,second=None):
1281 if second is None: 1282 raise RuntimeError("Cannot compare %s < None" % first) 1283 return '(%s < %s)' % (self.expand(first), 1284 self.expand(second,first.type))
1285
1286 - def LE(self,first,second=None):
1287 if second is None: 1288 raise RuntimeError("Cannot compare %s <= None" % first) 1289 return '(%s <= %s)' % (self.expand(first), 1290 self.expand(second,first.type))
1291
1292 - def GT(self,first,second=None):
1293 if second is None: 1294 raise RuntimeError("Cannot compare %s > None" % first) 1295 return '(%s > %s)' % (self.expand(first), 1296 self.expand(second,first.type))
1297
1298 - def GE(self,first,second=None):
1299 if second is None: 1300 raise RuntimeError("Cannot compare %s >= None" % first) 1301 return '(%s >= %s)' % (self.expand(first), 1302 self.expand(second,first.type))
1303
1304 - def is_numerical_type(self, ftype):
1305 return ftype in ('integer','boolean','double','bigint') or \ 1306 ftype.startswith('decimal')
1307
1308 - def REPLACE(self, first, (second, third)):
1309 return 'REPLACE(%s,%s,%s)' % (self.expand(first,'string'), 1310 self.expand(second,'string'), 1311 self.expand(third,'string'))
1312
1313 - def CONCAT(self, *items):
1314 return '(%s)' % ' || '.join(self.expand(x,'string') for x in items)
1315
1316 - def ADD(self, first, second):
1317 if self.is_numerical_type(first.type): 1318 return '(%s + %s)' % (self.expand(first), 1319 self.expand(second, first.type)) 1320 else: 1321 return self.CONCAT(first, second)
1322
1323 - def SUB(self, first, second):
1324 return '(%s - %s)' % (self.expand(first), 1325 self.expand(second, first.type))
1326
1327 - def MUL(self, first, second):
1328 return '(%s * %s)' % (self.expand(first), 1329 self.expand(second, first.type))
1330
1331 - def DIV(self, first, second):
1332 return '(%s / %s)' % (self.expand(first), 1333 self.expand(second, first.type))
1334
1335 - def MOD(self, first, second):
1336 return '(%s %% %s)' % (self.expand(first), 1337 self.expand(second, first.type))
1338
1339 - def AS(self, first, second):
1340 return '%s AS %s' % (self.expand(first), second)
1341
1342 - def ON(self, first, second):
1343 if use_common_filters(second): 1344 second = self.common_filter(second,[first._tablename]) 1345 return '%s ON %s' % (self.expand(first), self.expand(second))
1346
1347 - def INVERT(self, first):
1348 return '%s DESC' % self.expand(first)
1349
1350 - def COMMA(self, first, second):
1351 return '%s, %s' % (self.expand(first), self.expand(second))
1352
1353 - def expand(self, expression, field_type=None):
1354 if isinstance(expression, Field): 1355 return '%s.%s' % (expression.tablename, expression.name) 1356 elif isinstance(expression, (Expression, Query)): 1357 first = expression.first 1358 second = expression.second 1359 op = expression.op 1360 optional_args = expression.optional_args or {} 1361 if not second is None: 1362 return op(first, second, **optional_args) 1363 elif not first is None: 1364 return op(first,**optional_args) 1365 elif isinstance(op, str): 1366 if op.endswith(';'): 1367 op=op[:-1] 1368 return '(%s)' % op 1369 else: 1370 return op() 1371 elif field_type: 1372 return str(self.represent(expression,field_type)) 1373 elif isinstance(expression,(list,tuple)): 1374 return ','.join(self.represent(item,field_type) \ 1375 for item in expression) 1376 elif isinstance(expression, bool): 1377 return '1' if expression else '0' 1378 else: 1379 return str(expression)
1380
1381 - def alias(self, table, alias):
1382 """ 1383 Given a table object, makes a new table object 1384 with alias name. 1385 """ 1386 other = copy.copy(table) 1387 other['_ot'] = other._tablename 1388 other['ALL'] = SQLALL(other) 1389 other['_tablename'] = alias 1390 for fieldname in other.fields: 1391 other[fieldname] = copy.copy(other[fieldname]) 1392 other[fieldname]._tablename = alias 1393 other[fieldname].tablename = alias 1394 other[fieldname].table = other 1395 table._db[alias] = other 1396 return other
1397
1398 - def _truncate(self, table, mode=''):
1399 tablename = table._tablename 1400 return ['TRUNCATE TABLE %s %s;' % (tablename, mode or '')]
1401
1402 - def truncate(self, table, mode= ' '):
1403 # Prepare functions "write_to_logfile" and "close_logfile" 1404 if table._dbt: 1405 logfile = self.file_open(table._loggername, 'a') 1406 else: 1407 class Logfile(object): 1408 def write(self, value): 1409 pass
1410 def close(self): 1411 pass 1412 logfile = Logfile() 1413 1414 try: 1415 queries = table._db._adapter._truncate(table, mode) 1416 for query in queries: 1417 logfile.write(query + '\n') 1418 self.execute(query) 1419 table._db.commit() 1420 logfile.write('success!\n') 1421 finally: 1422 logfile.close() 1423
1424 - def _update(self, tablename, query, fields):
1425 if query: 1426 if use_common_filters(query): 1427 query = self.common_filter(query, [tablename]) 1428 sql_w = ' WHERE ' + self.expand(query) 1429 else: 1430 sql_w = '' 1431 sql_v = ','.join(['%s=%s' % (field.name, 1432 self.expand(value, field.type)) \ 1433 for (field, value) in fields]) 1434 return 'UPDATE %s SET %s%s;' % (tablename, sql_v, sql_w)
1435
1436 - def update(self, tablename, query, fields):
1437 sql = self._update(tablename, query, fields) 1438 self.execute(sql) 1439 try: 1440 return self.cursor.rowcount 1441 except: 1442 return None
1443
1444 - def _delete(self, tablename, query):
1445 if query: 1446 if use_common_filters(query): 1447 query = self.common_filter(query, [tablename]) 1448 sql_w = ' WHERE ' + self.expand(query) 1449 else: 1450 sql_w = '' 1451 return 'DELETE FROM %s%s;' % (tablename, sql_w)
1452
1453 - def delete(self, tablename, query):
1454 sql = self._delete(tablename, query) 1455 ### special code to handle CASCADE in SQLite & SpatiaLite 1456 db = self.db 1457 table = db[tablename] 1458 if self.dbengine in ('sqlite', 'spatialite') and table._referenced_by: 1459 deleted = [x[table._id.name] for x in db(query).select(table._id)] 1460 ### end special code to handle CASCADE in SQLite & SpatiaLite 1461 self.execute(sql) 1462 try: 1463 counter = self.cursor.rowcount 1464 except: 1465 counter = None 1466 ### special code to handle CASCADE in SQLite & SpatiaLite 1467 if self.dbengine in ('sqlite', 'spatialite') and counter: 1468 for field in table._referenced_by: 1469 if field.type=='reference '+table._tablename \ 1470 and field.ondelete=='CASCADE': 1471 db(field.belongs(deleted)).delete() 1472 ### end special code to handle CASCADE in SQLite & SpatiaLite 1473 return counter
1474
1475 - def get_table(self, query):
1476 tablenames = self.tables(query) 1477 if len(tablenames)==1: 1478 return tablenames[0] 1479 elif len(tablenames)<1: 1480 raise RuntimeError("No table selected") 1481 else: 1482 raise RuntimeError("Too many tables selected")
1483
1484 - def expand_all(self, fields, tablenames):
1485 db = self.db 1486 new_fields = [] 1487 append = new_fields.append 1488 for item in fields: 1489 if isinstance(item,SQLALL): 1490 new_fields += item._table 1491 elif isinstance(item,str): 1492 if REGEX_TABLE_DOT_FIELD.match(item): 1493 tablename,fieldname = item.split('.') 1494 append(db[tablename][fieldname]) 1495 else: 1496 append(Expression(db,lambda item=item:item)) 1497 else: 1498 append(item) 1499 # ## if no fields specified take them all from the requested tables 1500 if not new_fields: 1501 for table in tablenames: 1502 for field in db[table]: 1503 append(field) 1504 return new_fields
1505
1506 - def _select(self, query, fields, attributes):
1507 tables = self.tables 1508 for key in set(attributes.keys())-SELECT_ARGS: 1509 raise SyntaxError('invalid select attribute: %s' % key) 1510 args_get = attributes.get 1511 tablenames = tables(query) 1512 tablenames_for_common_filters = tablenames 1513 for field in fields: 1514 if isinstance(field, basestring) \ 1515 and REGEX_TABLE_DOT_FIELD.match(field): 1516 tn,fn = field.split('.') 1517 field = self.db[tn][fn] 1518 for tablename in tables(field): 1519 if not tablename in tablenames: 1520 tablenames.append(tablename) 1521 1522 if len(tablenames) < 1: 1523 raise SyntaxError('Set: no tables selected') 1524 self._colnames = map(self.expand, fields) 1525 def geoexpand(field): 1526 if isinstance(field.type,str) and field.type.startswith('geometry'): 1527 field = field.st_astext() 1528 return self.expand(field)
1529 sql_f = ', '.join(map(geoexpand, fields)) 1530 sql_o = '' 1531 sql_s = '' 1532 left = args_get('left', False) 1533 inner_join = args_get('join', False) 1534 distinct = args_get('distinct', False) 1535 groupby = args_get('groupby', False) 1536 orderby = args_get('orderby', False) 1537 having = args_get('having', False) 1538 limitby = args_get('limitby', False) 1539 orderby_on_limitby = args_get('orderby_on_limitby', True) 1540 for_update = args_get('for_update', False) 1541 if self.can_select_for_update is False and for_update is True: 1542 raise SyntaxError('invalid select attribute: for_update') 1543 if distinct is True: 1544 sql_s += 'DISTINCT' 1545 elif distinct: 1546 sql_s += 'DISTINCT ON (%s)' % distinct 1547 if inner_join: 1548 icommand = self.JOIN() 1549 if not isinstance(inner_join, (tuple, list)): 1550 inner_join = [inner_join] 1551 ijoint = [t._tablename for t in inner_join 1552 if not isinstance(t,Expression)] 1553 ijoinon = [t for t in inner_join if isinstance(t, Expression)] 1554 itables_to_merge={} #issue 490 1555 [itables_to_merge.update( 1556 dict.fromkeys(tables(t))) for t in ijoinon] 1557 ijoinont = [t.first._tablename for t in ijoinon] 1558 [itables_to_merge.pop(t) for t in ijoinont 1559 if t in itables_to_merge] #issue 490 1560 iimportant_tablenames = ijoint + ijoinont + itables_to_merge.keys() 1561 iexcluded = [t for t in tablenames 1562 if not t in iimportant_tablenames] 1563 if left: 1564 join = attributes['left'] 1565 command = self.LEFT_JOIN() 1566 if not isinstance(join, (tuple, list)): 1567 join = [join] 1568 joint = [t._tablename for t in join 1569 if not isinstance(t, Expression)] 1570 joinon = [t for t in join if isinstance(t, Expression)] 1571 #patch join+left patch (solves problem with ordering in left joins) 1572 tables_to_merge={} 1573 [tables_to_merge.update( 1574 dict.fromkeys(tables(t))) for t in joinon] 1575 joinont = [t.first._tablename for t in joinon] 1576 [tables_to_merge.pop(t) for t in joinont if t in tables_to_merge] 1577 tablenames_for_common_filters = [t for t in tablenames 1578 if not t in joinont ] 1579 important_tablenames = joint + joinont + tables_to_merge.keys() 1580 excluded = [t for t in tablenames 1581 if not t in important_tablenames ] 1582 else: 1583 excluded = tablenames 1584 1585 if use_common_filters(query): 1586 query = self.common_filter(query,tablenames_for_common_filters) 1587 sql_w = ' WHERE ' + self.expand(query) if query else '' 1588 1589 def alias(t): 1590 return str(self.db[t]) 1591 if inner_join and not left: 1592 sql_t = ', '.join([alias(t) for t in iexcluded + \ 1593 itables_to_merge.keys()]) 1594 for t in ijoinon: 1595 sql_t += ' %s %s' % (icommand, str(t)) 1596 elif not inner_join and left: 1597 sql_t = ', '.join([alias(t) for t in excluded + \ 1598 tables_to_merge.keys()]) 1599 if joint: 1600 sql_t += ' %s %s' % (command, ','.join([t for t in joint])) 1601 for t in joinon: 1602 sql_t += ' %s %s' % (command, str(t)) 1603 elif inner_join and left: 1604 all_tables_in_query = set(important_tablenames + \ 1605 iimportant_tablenames + \ 1606 tablenames) 1607 tables_in_joinon = set(joinont + ijoinont) 1608 tables_not_in_joinon = \ 1609 all_tables_in_query.difference(tables_in_joinon) 1610 sql_t = ','.join([alias(t) for t in tables_not_in_joinon]) 1611 for t in ijoinon: 1612 sql_t += ' %s %s' % (icommand, str(t)) 1613 if joint: 1614 sql_t += ' %s %s' % (command, ','.join([t for t in joint])) 1615 for t in joinon: 1616 sql_t += ' %s %s' % (command, str(t)) 1617 else: 1618 sql_t = ', '.join(alias(t) for t in tablenames) 1619 if groupby: 1620 if isinstance(groupby, (list, tuple)): 1621 groupby = xorify(groupby) 1622 sql_o += ' GROUP BY %s' % self.expand(groupby) 1623 if having: 1624 sql_o += ' HAVING %s' % attributes['having'] 1625 if orderby: 1626 if isinstance(orderby, (list, tuple)): 1627 orderby = xorify(orderby) 1628 if str(orderby) == '<random>': 1629 sql_o += ' ORDER BY %s' % self.RANDOM() 1630 else: 1631 sql_o += ' ORDER BY %s' % self.expand(orderby) 1632 if limitby: 1633 if orderby_on_limitby and not orderby and tablenames: 1634 sql_o += ' ORDER BY %s' % ', '.join(['%s.%s'%(t,x) for t in tablenames for x in (hasattr(self.db[t],'_primarykey') and self.db[t]._primarykey or [self.db[t]._id.name])]) 1635 # oracle does not support limitby 1636 sql = self.select_limitby(sql_s, sql_f, sql_t, sql_w, sql_o, limitby) 1637 if for_update and self.can_select_for_update is True: 1638 sql = sql.rstrip(';') + ' FOR UPDATE;' 1639 return sql 1640
1641 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
1642 if limitby: 1643 (lmin, lmax) = limitby 1644 sql_o += ' LIMIT %i OFFSET %i' % (lmax - lmin, lmin) 1645 return 'SELECT %s %s FROM %s%s%s;' % \ 1646 (sql_s, sql_f, sql_t, sql_w, sql_o)
1647
1648 - def _fetchall(self):
1649 return self.cursor.fetchall()
1650
1651 - def _select_aux(self,sql,fields,attributes):
1652 args_get = attributes.get 1653 cache = args_get('cache',None) 1654 if not cache: 1655 self.execute(sql) 1656 rows = self._fetchall() 1657 else: 1658 (cache_model, time_expire) = cache 1659 key = self.uri + '/' + sql + '/rows' 1660 if len(key)>200: key = hashlib_md5(key).hexdigest() 1661 def _select_aux2(): 1662 self.execute(sql) 1663 return self._fetchall()
1664 rows = cache_model(key,_select_aux2,time_expire) 1665 if isinstance(rows,tuple): 1666 rows = list(rows) 1667 limitby = args_get('limitby', None) or (0,) 1668 rows = self.rowslice(rows,limitby[0],None) 1669 processor = args_get('processor',self.parse) 1670 cacheable = args_get('cacheable',False) 1671 return processor(rows,fields,self._colnames,cacheable=cacheable) 1672
1673 - def select(self, query, fields, attributes):
1674 """ 1675 Always returns a Rows object, possibly empty. 1676 """ 1677 sql = self._select(query, fields, attributes) 1678 cache = attributes.get('cache', None) 1679 if cache and attributes.get('cacheable',False): 1680 del attributes['cache'] 1681 (cache_model, time_expire) = cache 1682 key = self.uri + '/' + sql 1683 if len(key)>200: key = hashlib_md5(key).hexdigest() 1684 args = (sql,fields,attributes) 1685 return cache_model( 1686 key, 1687 lambda self=self,args=args:self._select_aux(*args), 1688 time_expire) 1689 else: 1690 return self._select_aux(sql,fields,attributes)
1691
1692 - def _count(self, query, distinct=None):
1693 tablenames = self.tables(query) 1694 if query: 1695 if use_common_filters(query): 1696 query = self.common_filter(query, tablenames) 1697 sql_w = ' WHERE ' + self.expand(query) 1698 else: 1699 sql_w = '' 1700 sql_t = ','.join(tablenames) 1701 if distinct: 1702 if isinstance(distinct,(list, tuple)): 1703 distinct = xorify(distinct) 1704 sql_d = self.expand(distinct) 1705 return 'SELECT count(DISTINCT %s) FROM %s%s;' % \ 1706 (sql_d, sql_t, sql_w) 1707 return 'SELECT count(*) FROM %s%s;' % (sql_t, sql_w)
1708
1709 - def count(self, query, distinct=None):
1710 self.execute(self._count(query, distinct)) 1711 return self.cursor.fetchone()[0]
1712
1713 - def tables(self, *queries):
1714 tables = set() 1715 for query in queries: 1716 if isinstance(query, Field): 1717 tables.add(query.tablename) 1718 elif isinstance(query, (Expression, Query)): 1719 if not query.first is None: 1720 tables = tables.union(self.tables(query.first)) 1721 if not query.second is None: 1722 tables = tables.union(self.tables(query.second)) 1723 return list(tables)
1724
1725 - def commit(self):
1726 if self.connection: return self.connection.commit()
1727
1728 - def rollback(self):
1729 if self.connection: return self.connection.rollback()
1730
1731 - def close_connection(self):
1732 if self.connection: return self.connection.close()
1733
1734 - def distributed_transaction_begin(self, key):
1735 return
1736
1737 - def prepare(self, key):
1738 if self.connection: self.connection.prepare()
1739
1740 - def commit_prepared(self, key):
1741 if self.connection: self.connection.commit()
1742
1743 - def rollback_prepared(self, key):
1744 if self.connection: self.connection.rollback()
1745
1746 - def concat_add(self, tablename):
1747 return ', ADD '
1748
1749 - def constraint_name(self, table, fieldname):
1750 return '%s_%s__constraint' % (table,fieldname)
1751
1752 - def create_sequence_and_triggers(self, query, table, **args):
1753 self.execute(query)
1754
1755 - def log_execute(self, *a, **b):
1756 if not self.connection: return None 1757 command = a[0] 1758 if self.db._debug: 1759 LOGGER.debug('SQL: %s' % command) 1760 self.db._lastsql = command 1761 t0 = time.time() 1762 ret = self.cursor.execute(*a, **b) 1763 self.db._timings.append((command,time.time()-t0)) 1764 del self.db._timings[:-TIMINGSSIZE] 1765 return ret
1766
1767 - def execute(self, *a, **b):
1768 return self.log_execute(*a, **b)
1769
1770 - def represent(self, obj, fieldtype):
1771 field_is_type = fieldtype.startswith 1772 if isinstance(obj, CALLABLETYPES): 1773 obj = obj() 1774 if isinstance(fieldtype, SQLCustomType): 1775 value = fieldtype.encoder(obj) 1776 if fieldtype.type in ('string','text', 'json'): 1777 return self.adapt(value) 1778 return value 1779 if isinstance(obj, (Expression, Field)): 1780 return str(obj) 1781 if field_is_type('list:'): 1782 if not obj: 1783 obj = [] 1784 elif not isinstance(obj, (list, tuple)): 1785 obj = [obj] 1786 if field_is_type('list:string'): 1787 obj = map(str,obj) 1788 else: 1789 obj = map(int,obj) 1790 # we don't want to bar_encode json objects 1791 if isinstance(obj, (list, tuple)) and (not fieldtype == "json"): 1792 obj = bar_encode(obj) 1793 if obj is None: 1794 return 'NULL' 1795 if obj == '' and not fieldtype[:2] in ['st', 'te', 'js', 'pa', 'up']: 1796 return 'NULL' 1797 r = self.represent_exceptions(obj, fieldtype) 1798 if not r is None: 1799 return r 1800 if fieldtype == 'boolean': 1801 if obj and not str(obj)[:1].upper() in '0F': 1802 return self.smart_adapt(self.TRUE) 1803 else: 1804 return self.smart_adapt(self.FALSE) 1805 if fieldtype == 'id' or fieldtype == 'integer': 1806 return str(int(obj)) 1807 if field_is_type('decimal'): 1808 return str(obj) 1809 elif field_is_type('reference'): # reference 1810 if fieldtype.find('.')>0: 1811 return repr(obj) 1812 elif isinstance(obj, (Row, Reference)): 1813 return str(obj['id']) 1814 return str(int(obj)) 1815 elif fieldtype == 'double': 1816 return repr(float(obj)) 1817 if isinstance(obj, unicode): 1818 obj = obj.encode(self.db_codec) 1819 if fieldtype == 'blob': 1820 obj = base64.b64encode(str(obj)) 1821 elif fieldtype == 'date': 1822 if isinstance(obj, (datetime.date, datetime.datetime)): 1823 obj = obj.isoformat()[:10] 1824 else: 1825 obj = str(obj) 1826 elif fieldtype == 'datetime': 1827 if isinstance(obj, datetime.datetime): 1828 obj = obj.isoformat(self.T_SEP)[:19] 1829 elif isinstance(obj, datetime.date): 1830 obj = obj.isoformat()[:10]+' 00:00:00' 1831 else: 1832 obj = str(obj) 1833 elif fieldtype == 'time': 1834 if isinstance(obj, datetime.time): 1835 obj = obj.isoformat()[:10] 1836 else: 1837 obj = str(obj) 1838 elif fieldtype == 'json': 1839 if not self.native_json: 1840 if have_serializers: 1841 obj = serializers.json(obj) 1842 elif simplejson: 1843 obj = simplejson.dumps(items) 1844 else: 1845 raise RuntimeError("missing simplejson") 1846 if not isinstance(obj,bytes): 1847 obj = bytes(obj) 1848 try: 1849 obj.decode(self.db_codec) 1850 except: 1851 obj = obj.decode('latin1').encode(self.db_codec) 1852 return self.adapt(obj)
1853
1854 - def represent_exceptions(self, obj, fieldtype):
1855 return None
1856
1857 - def lastrowid(self, table):
1858 return None
1859
1860 - def integrity_error_class(self):
1861 return type(None)
1862
1863 - def rowslice(self, rows, minimum=0, maximum=None):
1864 """ 1865 By default this function does nothing; 1866 overload when db does not do slicing. 1867 """ 1868 return rows
1869
1870 - def parse_value(self, value, field_type, blob_decode=True):
1871 if field_type != 'blob' and isinstance(value, str): 1872 try: 1873 value = value.decode(self.db._db_codec) 1874 except Exception: 1875 pass 1876 if isinstance(value, unicode): 1877 value = value.encode('utf-8') 1878 if isinstance(field_type, SQLCustomType): 1879 value = field_type.decoder(value) 1880 if not isinstance(field_type, str) or value is None: 1881 return value 1882 elif field_type in ('string', 'text', 'password', 'upload', 'dict'): 1883 return value 1884 elif field_type.startswith('geo'): 1885 return value 1886 elif field_type == 'blob' and not blob_decode: 1887 return value 1888 else: 1889 key = REGEX_TYPE.match(field_type).group(0) 1890 return self.parsemap[key](value,field_type)
1891
1892 - def parse_reference(self, value, field_type):
1893 referee = field_type[10:].strip() 1894 if not '.' in referee: 1895 value = Reference(value) 1896 value._table, value._record = self.db[referee], None 1897 return value
1898
1899 - def parse_boolean(self, value, field_type):
1900 return value == True or str(value)[:1].lower() == 't'
1901
1902 - def parse_date(self, value, field_type):
1903 if isinstance(value, datetime.datetime): 1904 return value.date() 1905 if not isinstance(value, (datetime.date,datetime.datetime)): 1906 (y, m, d) = map(int, str(value)[:10].strip().split('-')) 1907 value = datetime.date(y, m, d) 1908 return value
1909
1910 - def parse_time(self, value, field_type):
1911 if not isinstance(value, datetime.time): 1912 time_items = map(int,str(value)[:8].strip().split(':')[:3]) 1913 if len(time_items) == 3: 1914 (h, mi, s) = time_items 1915 else: 1916 (h, mi, s) = time_items + [0] 1917 value = datetime.time(h, mi, s) 1918 return value
1919
1920 - def parse_datetime(self, value, field_type):
1921 if not isinstance(value, datetime.datetime): 1922 value = str(value) 1923 date_part,time_part,timezone = value[:10],value[11:19],value[19:] 1924 if '+' in timezone: 1925 ms,tz = timezone.split('+') 1926 h,m = tz.split(':') 1927 dt = datetime.timedelta(seconds=3600*int(h)+60*int(m)) 1928 elif '-' in timezone: 1929 ms,tz = timezone.split('-') 1930 h,m = tz.split(':') 1931 dt = -datetime.timedelta(seconds=3600*int(h)+60*int(m)) 1932 else: 1933 dt = None 1934 (y, m, d) = map(int,date_part.split('-')) 1935 time_parts = time_part and time_part.split(':')[:3] or (0,0,0) 1936 while len(time_parts)<3: time_parts.append(0) 1937 time_items = map(int,time_parts) 1938 (h, mi, s) = time_items 1939 value = datetime.datetime(y, m, d, h, mi, s) 1940 if dt: 1941 value = value + dt 1942 return value
1943
1944 - def parse_blob(self, value, field_type):
1945 return base64.b64decode(str(value))
1946
1947 - def parse_decimal(self, value, field_type):
1948 decimals = int(field_type[8:-1].split(',')[-1]) 1949 if self.dbengine in ('sqlite', 'spatialite'): 1950 value = ('%.' + str(decimals) + 'f') % value 1951 if not isinstance(value, decimal.Decimal): 1952 value = decimal.Decimal(str(value)) 1953 return value
1954
1955 - def parse_list_integers(self, value, field_type):
1956 if not isinstance(self, NoSQLAdapter): 1957 value = bar_decode_integer(value) 1958 return value
1959
1960 - def parse_list_references(self, value, field_type):
1961 if not isinstance(self, NoSQLAdapter): 1962 value = bar_decode_integer(value) 1963 return [self.parse_reference(r, field_type[5:]) for r in value]
1964
1965 - def parse_list_strings(self, value, field_type):
1966 if not isinstance(self, NoSQLAdapter): 1967 value = bar_decode_string(value) 1968 return value
1969
1970 - def parse_id(self, value, field_type):
1971 return int(value)
1972
1973 - def parse_integer(self, value, field_type):
1974 return int(value)
1975
1976 - def parse_double(self, value, field_type):
1977 return float(value)
1978
1979 - def parse_json(self, value, field_type):
1980 if not self.native_json: 1981 if not isinstance(value, basestring): 1982 raise RuntimeError('json data not a string') 1983 if isinstance(value, unicode): 1984 value = value.encode('utf-8') 1985 if have_serializers: 1986 value = serializers.loads_json(value) 1987 elif simplejson: 1988 value = simplejson.loads(value) 1989 else: 1990 raise RuntimeError("missing simplejson") 1991 return value
1992
1993 - def build_parsemap(self):
1994 self.parsemap = { 1995 'id':self.parse_id, 1996 'integer':self.parse_integer, 1997 'bigint':self.parse_integer, 1998 'float':self.parse_double, 1999 'double':self.parse_double, 2000 'reference':self.parse_reference, 2001 'boolean':self.parse_boolean, 2002 'date':self.parse_date, 2003 'time':self.parse_time, 2004 'datetime':self.parse_datetime, 2005 'blob':self.parse_blob, 2006 'decimal':self.parse_decimal, 2007 'json':self.parse_json, 2008 'list:integer':self.parse_list_integers, 2009 'list:reference':self.parse_list_references, 2010 'list:string':self.parse_list_strings, 2011 }
2012
2013 - def parse(self, rows, fields, colnames, blob_decode=True, 2014 cacheable = False):
2015 self.build_parsemap() 2016 db = self.db 2017 virtualtables = [] 2018 new_rows = [] 2019 tmps = [] 2020 for colname in colnames: 2021 if not REGEX_TABLE_DOT_FIELD.match(colname): 2022 tmps.append(None) 2023 else: 2024 (tablename, fieldname) = colname.split('.') 2025 table = db[tablename] 2026 field = table[fieldname] 2027 ft = field.type 2028 tmps.append((tablename,fieldname,table,field,ft)) 2029 for (i,row) in enumerate(rows): 2030 new_row = Row() 2031 for (j,colname) in enumerate(colnames): 2032 value = row[j] 2033 tmp = tmps[j] 2034 if tmp: 2035 (tablename,fieldname,table,field,ft) = tmp 2036 if tablename in new_row: 2037 colset = new_row[tablename] 2038 else: 2039 colset = new_row[tablename] = Row() 2040 if tablename not in virtualtables: 2041 virtualtables.append(tablename) 2042 value = self.parse_value(value,ft,blob_decode) 2043 if field.filter_out: 2044 value = field.filter_out(value) 2045 colset[fieldname] = value 2046 2047 # for backward compatibility 2048 if ft=='id' and fieldname!='id' and \ 2049 not 'id' in table.fields: 2050 colset['id'] = value 2051 2052 if ft == 'id' and not cacheable: 2053 # temporary hack to deal with 2054 # GoogleDatastoreAdapter 2055 # references 2056 if isinstance(self, GoogleDatastoreAdapter): 2057 id = value.key().id_or_name() 2058 colset[fieldname] = id 2059 colset.gae_item = value 2060 else: 2061 id = value 2062 colset.update_record = RecordUpdater(colset,table,id) 2063 colset.delete_record = RecordDeleter(table,id) 2064 for rfield in table._referenced_by: 2065 referee_link = db._referee_name and \ 2066 db._referee_name % dict( 2067 table=rfield.tablename,field=rfield.name) 2068 if referee_link and not referee_link in colset: 2069 colset[referee_link] = LazySet(rfield,id) 2070 else: 2071 if not '_extra' in new_row: 2072 new_row['_extra'] = Row() 2073 new_row['_extra'][colname] = \ 2074 self.parse_value(value, 2075 fields[j].type,blob_decode) 2076 new_column_name = \ 2077 REGEX_SELECT_AS_PARSER.search(colname) 2078 if not new_column_name is None: 2079 column_name = new_column_name.groups(0) 2080 setattr(new_row,column_name[0],value) 2081 new_rows.append(new_row) 2082 rowsobj = Rows(db, new_rows, colnames, rawrows=rows) 2083 2084 for tablename in virtualtables: 2085 ### new style virtual fields 2086 table = db[tablename] 2087 fields_virtual = [(f,v) for (f,v) in table.iteritems() 2088 if isinstance(v,FieldVirtual)] 2089 fields_lazy = [(f,v) for (f,v) in table.iteritems() 2090 if isinstance(v,FieldMethod)] 2091 if fields_virtual or fields_lazy: 2092 for row in rowsobj.records: 2093 box = row[tablename] 2094 for f,v in fields_virtual: 2095 box[f] = v.f(row) 2096 for f,v in fields_lazy: 2097 box[f] = (v.handler or VirtualCommand)(v.f,row) 2098 2099 ### old style virtual fields 2100 for item in table.virtualfields: 2101 try: 2102 rowsobj = rowsobj.setvirtualfields(**{tablename:item}) 2103 except (KeyError, AttributeError): 2104 # to avoid breaking virtualfields when partial select 2105 pass 2106 return rowsobj
2107
2108 - def common_filter(self, query, tablenames):
2109 tenant_fieldname = self.db._request_tenant 2110 2111 for tablename in tablenames: 2112 table = self.db[tablename] 2113 2114 # deal with user provided filters 2115 if table._common_filter != None: 2116 query = query & table._common_filter(query) 2117 2118 # deal with multi_tenant filters 2119 if tenant_fieldname in table: 2120 default = table[tenant_fieldname].default 2121 if not default is None: 2122 newquery = table[tenant_fieldname] == default 2123 if query is None: 2124 query = newquery 2125 else: 2126 query = query & newquery 2127 return query
2128
2129 - def CASE(self,query,t,f):
2130 def represent(x): 2131 types = {type(True):'boolean',type(0):'integer',type(1.0):'double'} 2132 if x is None: return 'NULL' 2133 elif isinstance(x,Expression): return str(x) 2134 else: return self.represent(x,types.get(type(x),'string'))
2135 return Expression(self.db,'CASE WHEN %s THEN %s ELSE %s END' % \ 2136 (self.expand(query),represent(t),represent(f))) 2137
2138 ################################################################################### 2139 # List of all the available adapters; they all extend BaseAdapter. 2140 ################################################################################### 2141 2142 -class SQLiteAdapter(BaseAdapter):
2143 drivers = ('sqlite2','sqlite3') 2144 2145 can_select_for_update = None # support ourselves with BEGIN TRANSACTION 2146
2147 - def EXTRACT(self,field,what):
2148 return "web2py_extract('%s',%s)" % (what, self.expand(field))
2149 2150 @staticmethod
2151 - def web2py_extract(lookup, s):
2152 table = { 2153 'year': (0, 4), 2154 'month': (5, 7), 2155 'day': (8, 10), 2156 'hour': (11, 13), 2157 'minute': (14, 16), 2158 'second': (17, 19), 2159 } 2160 try: 2161 if lookup != 'epoch': 2162 (i, j) = table[lookup] 2163 return int(s[i:j]) 2164 else: 2165 return time.mktime(datetime.datetime.strptime(s, '%Y-%m-%d %H:%M:%S').timetuple()) 2166 except: 2167 return None
2168 2169 @staticmethod
2170 - def web2py_regexp(expression, item):
2171 return re.compile(expression).search(item) is not None
2172
2173 - def __init__(self, db, uri, pool_size=0, folder=None, db_codec ='UTF-8', 2174 credential_decoder=IDENTITY, driver_args={}, 2175 adapter_args={}, do_connect=True, after_connection=None):
2176 self.db = db 2177 self.dbengine = "sqlite" 2178 self.uri = uri 2179 if do_connect: self.find_driver(adapter_args) 2180 self.pool_size = 0 2181 self.folder = folder 2182 self.db_codec = db_codec 2183 self._after_connection = after_connection 2184 self.find_or_make_work_folder() 2185 path_encoding = sys.getfilesystemencoding() \ 2186 or locale.getdefaultlocale()[1] or 'utf8' 2187 if uri.startswith('sqlite:memory'): 2188 dbpath = ':memory:' 2189 else: 2190 dbpath = uri.split('://',1)[1] 2191 if dbpath[0] != '/': 2192 if PYTHON_VERSION == 2: 2193 dbpath = pjoin( 2194 self.folder.decode(path_encoding).encode('utf8'), dbpath) 2195 else: 2196 dbpath = pjoin(self.folder, dbpath) 2197 if not 'check_same_thread' in driver_args: 2198 driver_args['check_same_thread'] = False 2199 if not 'detect_types' in driver_args and do_connect: 2200 driver_args['detect_types'] = self.driver.PARSE_DECLTYPES 2201 def connector(dbpath=dbpath, driver_args=driver_args): 2202 return self.driver.Connection(dbpath, **driver_args)
2203 self.connector = connector 2204 if do_connect: self.reconnect()
2205
2206 - def after_connection(self):
2207 self.connection.create_function('web2py_extract', 2, 2208 SQLiteAdapter.web2py_extract) 2209 self.connection.create_function("REGEXP", 2, 2210 SQLiteAdapter.web2py_regexp)
2211
2212 - def _truncate(self, table, mode=''):
2213 tablename = table._tablename 2214 return ['DELETE FROM %s;' % tablename, 2215 "DELETE FROM sqlite_sequence WHERE name='%s';" % tablename]
2216
2217 - def lastrowid(self, table):
2218 return self.cursor.lastrowid
2219
2220 - def REGEXP(self,first,second):
2221 return '(%s REGEXP %s)' % (self.expand(first), 2222 self.expand(second,'string'))
2223
2224 - def select(self, query, fields, attributes):
2225 """ 2226 Simulate SELECT ... FOR UPDATE with BEGIN IMMEDIATE TRANSACTION. 2227 Note that the entire database, rather than one record, is locked 2228 (it will be locked eventually anyway by the following UPDATE). 2229 """ 2230 if attributes.get('for_update', False) and not 'cache' in attributes: 2231 self.execute('BEGIN IMMEDIATE TRANSACTION;') 2232 return super(SQLiteAdapter, self).select(query, fields, attributes)
2233
2234 -class SpatiaLiteAdapter(SQLiteAdapter):
2235 drivers = ('sqlite3','sqlite2') 2236 2237 types = copy.copy(BaseAdapter.types) 2238 types.update(geometry='GEOMETRY') 2239
2240 - def __init__(self, db, uri, pool_size=0, folder=None, db_codec ='UTF-8', 2241 credential_decoder=IDENTITY, driver_args={}, 2242 adapter_args={}, do_connect=True, srid=4326, after_connection=None):
2243 self.db = db 2244 self.dbengine = "spatialite" 2245 self.uri = uri 2246 if do_connect: self.find_driver(adapter_args) 2247 self.pool_size = 0 2248 self.folder = folder 2249 self.db_codec = db_codec 2250 self._after_connection = after_connection 2251 self.find_or_make_work_folder() 2252 self.srid = srid 2253 path_encoding = sys.getfilesystemencoding() \ 2254 or locale.getdefaultlocale()[1] or 'utf8' 2255 if uri.startswith('spatialite:memory'): 2256 dbpath = ':memory:' 2257 else: 2258 dbpath = uri.split('://',1)[1] 2259 if dbpath[0] != '/': 2260 dbpath = pjoin( 2261 self.folder.decode(path_encoding).encode('utf8'), dbpath) 2262 if not 'check_same_thread' in driver_args: 2263 driver_args['check_same_thread'] = False 2264 if not 'detect_types' in driver_args and do_connect: 2265 driver_args['detect_types'] = self.driver.PARSE_DECLTYPES 2266 def connector(dbpath=dbpath, driver_args=driver_args): 2267 return self.driver.Connection(dbpath, **driver_args)
2268 self.connector = connector 2269 if do_connect: self.reconnect()
2270
2271 - def after_connection(self):
2272 self.connection.enable_load_extension(True) 2273 # for Windows, rename libspatialite-2.dll to libspatialite.dll 2274 # Linux uses libspatialite.so 2275 # Mac OS X uses libspatialite.dylib 2276 libspatialite = SPATIALLIBS[platform.system()] 2277 self.execute(r'SELECT load_extension("%s");' % libspatialite) 2278 2279 self.connection.create_function('web2py_extract', 2, 2280 SQLiteAdapter.web2py_extract) 2281 self.connection.create_function("REGEXP", 2, 2282 SQLiteAdapter.web2py_regexp)
2283 2284 # GIS functions 2285
2286 - def ST_ASGEOJSON(self, first, second):
2287 return 'AsGeoJSON(%s,%s,%s)' %(self.expand(first), 2288 second['precision'], second['options'])
2289
2290 - def ST_ASTEXT(self, first):
2291 return 'AsText(%s)' %(self.expand(first))
2292
2293 - def ST_CONTAINS(self, first, second):
2294 return 'Contains(%s,%s)' %(self.expand(first), 2295 self.expand(second, first.type))
2296
2297 - def ST_DISTANCE(self, first, second):
2298 return 'Distance(%s,%s)' %(self.expand(first), 2299 self.expand(second, first.type))
2300
2301 - def ST_EQUALS(self, first, second):
2302 return 'Equals(%s,%s)' %(self.expand(first), 2303 self.expand(second, first.type))
2304
2305 - def ST_INTERSECTS(self, first, second):
2306 return 'Intersects(%s,%s)' %(self.expand(first), 2307 self.expand(second, first.type))
2308
2309 - def ST_OVERLAPS(self, first, second):
2310 return 'Overlaps(%s,%s)' %(self.expand(first), 2311 self.expand(second, first.type))
2312
2313 - def ST_SIMPLIFY(self, first, second):
2314 return 'Simplify(%s,%s)' %(self.expand(first), 2315 self.expand(second, 'double'))
2316
2317 - def ST_TOUCHES(self, first, second):
2318 return 'Touches(%s,%s)' %(self.expand(first), 2319 self.expand(second, first.type))
2320
2321 - def ST_WITHIN(self, first, second):
2322 return 'Within(%s,%s)' %(self.expand(first), 2323 self.expand(second, first.type))
2324
2325 - def represent(self, obj, fieldtype):
2326 field_is_type = fieldtype.startswith 2327 if field_is_type('geo'): 2328 srid = 4326 # Spatialite default srid for geometry 2329 geotype, parms = fieldtype[:-1].split('(') 2330 parms = parms.split(',') 2331 if len(parms) >= 2: 2332 schema, srid = parms[:2] 2333 # if field_is_type('geometry'): 2334 value = "ST_GeomFromText('%s',%s)" %(obj, srid) 2335 # elif field_is_type('geography'): 2336 # value = "ST_GeogFromText('SRID=%s;%s')" %(srid, obj) 2337 # else: 2338 # raise SyntaxError, 'Invalid field type %s' %fieldtype 2339 return value 2340 return BaseAdapter.represent(self, obj, fieldtype)
2341
2342 2343 -class JDBCSQLiteAdapter(SQLiteAdapter):
2344 drivers = ('zxJDBC_sqlite',) 2345
2346 - def __init__(self, db, uri, pool_size=0, folder=None, db_codec='UTF-8', 2347 credential_decoder=IDENTITY, driver_args={}, 2348 adapter_args={}, do_connect=True, after_connection=None):
2349 self.db = db 2350 self.dbengine = "sqlite" 2351 self.uri = uri 2352 if do_connect: self.find_driver(adapter_args) 2353 self.pool_size = pool_size 2354 self.folder = folder 2355 self.db_codec = db_codec 2356 self._after_connection = after_connection 2357 self.find_or_make_work_folder() 2358 path_encoding = sys.getfilesystemencoding() \ 2359 or locale.getdefaultlocale()[1] or 'utf8' 2360 if uri.startswith('sqlite:memory'): 2361 dbpath = ':memory:' 2362 else: 2363 dbpath = uri.split('://',1)[1] 2364 if dbpath[0] != '/': 2365 dbpath = pjoin( 2366 self.folder.decode(path_encoding).encode('utf8'), dbpath) 2367 def connector(dbpath=dbpath,driver_args=driver_args): 2368 return self.driver.connect( 2369 self.driver.getConnection('jdbc:sqlite:'+dbpath), 2370 **driver_args)
2371 self.connector = connector 2372 if do_connect: self.reconnect()
2373
2374 - def after_connection(self):
2375 # FIXME http://www.zentus.com/sqlitejdbc/custom_functions.html for UDFs 2376 self.connection.create_function('web2py_extract', 2, 2377 SQLiteAdapter.web2py_extract)
2378
2379 - def execute(self, a):
2380 return self.log_execute(a)
2381
2382 2383 -class MySQLAdapter(BaseAdapter):
2384 drivers = ('MySQLdb','pymysql') 2385 2386 maxcharlength = 255 2387 commit_on_alter_table = True 2388 support_distributed_transaction = True 2389 types = { 2390 'boolean': 'CHAR(1)', 2391 'string': 'VARCHAR(%(length)s)', 2392 'text': 'LONGTEXT', 2393 'json': 'LONGTEXT', 2394 'password': 'VARCHAR(%(length)s)', 2395 'blob': 'LONGBLOB', 2396 'upload': 'VARCHAR(%(length)s)', 2397 'integer': 'INT', 2398 'bigint': 'BIGINT', 2399 'float': 'FLOAT', 2400 'double': 'DOUBLE', 2401 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 2402 'date': 'DATE', 2403 'time': 'TIME', 2404 'datetime': 'DATETIME', 2405 'id': 'INT AUTO_INCREMENT NOT NULL', 2406 'reference': 'INT, INDEX %(index_name)s (%(field_name)s), FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2407 'list:integer': 'LONGTEXT', 2408 'list:string': 'LONGTEXT', 2409 'list:reference': 'LONGTEXT', 2410 'big-id': 'BIGINT AUTO_INCREMENT NOT NULL', 2411 'big-reference': 'BIGINT, INDEX %(index_name)s (%(field_name)s), FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2412 } 2413
2414 - def varquote(self,name):
2415 return varquote_aux(name,'`%s`')
2416
2417 - def RANDOM(self):
2418 return 'RAND()'
2419
2420 - def SUBSTRING(self,field,parameters):
2421 return 'SUBSTRING(%s,%s,%s)' % (self.expand(field), 2422 parameters[0], parameters[1])
2423
2424 - def EPOCH(self, first):
2425 return "UNIX_TIMESTAMP(%s)" % self.expand(first)
2426
2427 - def CONCAT(self, *items):
2428 return 'CONCAT(%s)' % ','.join(self.expand(x,'string') for x in items)
2429
2430 - def REGEXP(self,first,second):
2431 return '(%s REGEXP %s)' % (self.expand(first), 2432 self.expand(second,'string'))
2433
2434 - def _drop(self,table,mode):
2435 # breaks db integrity but without this mysql does not drop table 2436 return ['SET FOREIGN_KEY_CHECKS=0;','DROP TABLE %s;' % table, 2437 'SET FOREIGN_KEY_CHECKS=1;']
2438
2439 - def _insert_empty(self, table):
2440 return 'INSERT INTO %s VALUES (DEFAULT);' % table
2441
2442 - def distributed_transaction_begin(self,key):
2443 self.execute('XA START;')
2444
2445 - def prepare(self,key):
2446 self.execute("XA END;") 2447 self.execute("XA PREPARE;")
2448
2449 - def commit_prepared(self,ley):
2450 self.execute("XA COMMIT;")
2451
2452 - def rollback_prepared(self,key):
2453 self.execute("XA ROLLBACK;")
2454 2455 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^?]+)(\?set_encoding=(?P<charset>\w+))?$') 2456
2457 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 2458 credential_decoder=IDENTITY, driver_args={}, 2459 adapter_args={}, do_connect=True, after_connection=None):
2460 self.db = db 2461 self.dbengine = "mysql" 2462 self.uri = uri 2463 if do_connect: self.find_driver(adapter_args,uri) 2464 self.pool_size = pool_size 2465 self.folder = folder 2466 self.db_codec = db_codec 2467 self._after_connection = after_connection 2468 self.find_or_make_work_folder() 2469 ruri = uri.split('://',1)[1] 2470 m = self.REGEX_URI.match(ruri) 2471 if not m: 2472 raise SyntaxError( 2473 "Invalid URI string in DAL: %s" % self.uri) 2474 user = credential_decoder(m.group('user')) 2475 if not user: 2476 raise SyntaxError('User required') 2477 password = credential_decoder(m.group('password')) 2478 if not password: 2479 password = '' 2480 host = m.group('host') 2481 if not host: 2482 raise SyntaxError('Host name required') 2483 db = m.group('db') 2484 if not db: 2485 raise SyntaxError('Database name required') 2486 port = int(m.group('port') or '3306') 2487 charset = m.group('charset') or 'utf8' 2488 driver_args.update(db=db, 2489 user=credential_decoder(user), 2490 passwd=credential_decoder(password), 2491 host=host, 2492 port=port, 2493 charset=charset) 2494 2495 2496 def connector(driver_args=driver_args): 2497 return self.driver.connect(**driver_args)
2498 self.connector = connector 2499 if do_connect: self.reconnect()
2500
2501 - def after_connection(self):
2502 self.execute('SET FOREIGN_KEY_CHECKS=1;') 2503 self.execute("SET sql_mode='NO_BACKSLASH_ESCAPES';")
2504
2505 - def lastrowid(self,table):
2506 self.execute('select last_insert_id();') 2507 return int(self.cursor.fetchone()[0])
2508
2509 -class PostgreSQLAdapter(BaseAdapter):
2510 drivers = ('psycopg2','pg8000') 2511 2512 support_distributed_transaction = True 2513 types = { 2514 'boolean': 'CHAR(1)', 2515 'string': 'VARCHAR(%(length)s)', 2516 'text': 'TEXT', 2517 'json': 'TEXT', 2518 'password': 'VARCHAR(%(length)s)', 2519 'blob': 'BYTEA', 2520 'upload': 'VARCHAR(%(length)s)', 2521 'integer': 'INTEGER', 2522 'bigint': 'BIGINT', 2523 'float': 'FLOAT', 2524 'double': 'FLOAT8', 2525 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 2526 'date': 'DATE', 2527 'time': 'TIME', 2528 'datetime': 'TIMESTAMP', 2529 'id': 'SERIAL PRIMARY KEY', 2530 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2531 'list:integer': 'TEXT', 2532 'list:string': 'TEXT', 2533 'list:reference': 'TEXT', 2534 'geometry': 'GEOMETRY', 2535 'geography': 'GEOGRAPHY', 2536 'big-id': 'BIGSERIAL PRIMARY KEY', 2537 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2538 } 2539
2540 - def varquote(self,name):
2541 return varquote_aux(name,'"%s"')
2542
2543 - def adapt(self,obj):
2544 if self.driver_name == 'psycopg2': 2545 return psycopg2_adapt(obj).getquoted() 2546 elif self.driver_name == 'pg8000': 2547 return "'%s'" % str(obj).replace("%","%%").replace("'","''") 2548 else: 2549 return "'%s'" % str(obj).replace("'","''")
2550
2551 - def sequence_name(self,table):
2552 return '%s_id_Seq' % table
2553
2554 - def RANDOM(self):
2555 return 'RANDOM()'
2556
2557 - def ADD(self, first, second):
2558 t = first.type 2559 if t in ('text','string','password', 'json', 'upload','blob'): 2560 return '(%s || %s)' % (self.expand(first), self.expand(second, t)) 2561 else: 2562 return '(%s + %s)' % (self.expand(first), self.expand(second, t))
2563
2564 - def distributed_transaction_begin(self,key):
2565 return
2566
2567 - def prepare(self,key):
2568 self.execute("PREPARE TRANSACTION '%s';" % key)
2569
2570 - def commit_prepared(self,key):
2571 self.execute("COMMIT PREPARED '%s';" % key)
2572
2573 - def rollback_prepared(self,key):
2574 self.execute("ROLLBACK PREPARED '%s';" % key)
2575
2576 - def create_sequence_and_triggers(self, query, table, **args):
2577 # following lines should only be executed if table._sequence_name does not exist 2578 # self.execute('CREATE SEQUENCE %s;' % table._sequence_name) 2579 # self.execute("ALTER TABLE %s ALTER COLUMN %s SET DEFAULT NEXTVAL('%s');" \ 2580 # % (table._tablename, table._fieldname, table._sequence_name)) 2581 self.execute(query)
2582 2583 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:@]+)(\:(?P<port>[0-9]+))?/(?P<db>[^\?]+)(\?sslmode=(?P<sslmode>.+))?$') 2584
2585 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 2586 credential_decoder=IDENTITY, driver_args={}, 2587 adapter_args={}, do_connect=True, srid=4326, 2588 after_connection=None):
2589 self.db = db 2590 self.dbengine = "postgres" 2591 self.uri = uri 2592 if do_connect: self.find_driver(adapter_args,uri) 2593 self.pool_size = pool_size 2594 self.folder = folder 2595 self.db_codec = db_codec 2596 self._after_connection = after_connection 2597 self.srid = srid 2598 self.find_or_make_work_folder() 2599 ruri = uri.split('://',1)[1] 2600 m = self.REGEX_URI.match(ruri) 2601 if not m: 2602 raise SyntaxError("Invalid URI string in DAL") 2603 user = credential_decoder(m.group('user')) 2604 if not user: 2605 raise SyntaxError('User required') 2606 password = credential_decoder(m.group('password')) 2607 if not password: 2608 password = '' 2609 host = m.group('host') 2610 if not host: 2611 raise SyntaxError('Host name required') 2612 db = m.group('db') 2613 if not db: 2614 raise SyntaxError('Database name required') 2615 port = m.group('port') or '5432' 2616 sslmode = m.group('sslmode') 2617 if sslmode: 2618 msg = ("dbname='%s' user='%s' host='%s' " 2619 "port=%s password='%s' sslmode='%s'") \ 2620 % (db, user, host, port, password, sslmode) 2621 else: 2622 msg = ("dbname='%s' user='%s' host='%s' " 2623 "port=%s password='%s'") \ 2624 % (db, user, host, port, password) 2625 # choose diver according uri 2626 if self.driver: 2627 self.__version__ = "%s %s" % (self.driver.__name__, 2628 self.driver.__version__) 2629 else: 2630 self.__version__ = None 2631 def connector(msg=msg,driver_args=driver_args): 2632 return self.driver.connect(msg,**driver_args)
2633 self.connector = connector 2634 if do_connect: self.reconnect()
2635
2636 - def after_connection(self):
2637 self.connection.set_client_encoding('UTF8') 2638 self.execute("SET standard_conforming_strings=on;") 2639 self.try_json()
2640
2641 - def lastrowid(self,table):
2642 self.execute("select currval('%s')" % table._sequence_name) 2643 return int(self.cursor.fetchone()[0])
2644
2645 - def try_json(self):
2646 # check JSON data type support 2647 # (to be added to after_connection) 2648 if self.driver_name == "pg8000": 2649 supports_json = self.connection.server_version >= "9.2.0" 2650 elif (self.driver_name == "psycopg2") and \ 2651 (self.driver.__version__ >= "2.0.12"): 2652 supports_json = self.connection.server_version >= 90200 2653 elif self.driver_name == "zxJDBC": 2654 supports_json = self.connection.dbversion >= "9.2.0" 2655 else: supports_json = None 2656 if supports_json: self.types["json"] = "JSON" 2657 else: LOGGER.debug("Your database version does not support the JSON data type (using TEXT instead)")
2658
2659 - def LIKE(self,first,second):
2660 args = (self.expand(first), self.expand(second,'string')) 2661 if not first.type in ('string', 'text', 'json'): 2662 return '(CAST(%s AS CHAR(%s)) LIKE %s)' % (args[0], first.length, args[1]) 2663 else: 2664 return '(%s LIKE %s)' % args
2665
2666 - def ILIKE(self,first,second):
2667 args = (self.expand(first), self.expand(second,'string')) 2668 if not first.type in ('string', 'text', 'json'): 2669 return '(CAST(%s AS CHAR(%s)) LIKE %s)' % (args[0], first.length, args[1]) 2670 else: 2671 return '(%s ILIKE %s)' % args
2672
2673 - def REGEXP(self,first,second):
2674 return '(%s ~ %s)' % (self.expand(first), 2675 self.expand(second,'string'))
2676
2677 - def STARTSWITH(self,first,second):
2678 return '(%s ILIKE %s)' % (self.expand(first), 2679 self.expand(second+'%','string'))
2680
2681 - def ENDSWITH(self,first,second):
2682 return '(%s ILIKE %s)' % (self.expand(first), 2683 self.expand('%'+second,'string'))
2684 2685 # GIS functions 2686
2687 - def ST_ASGEOJSON(self, first, second):
2688 """ 2689 http://postgis.org/docs/ST_AsGeoJSON.html 2690 """ 2691 return 'ST_AsGeoJSON(%s,%s,%s,%s)' %(second['version'], 2692 self.expand(first), second['precision'], second['options'])
2693
2694 - def ST_ASTEXT(self, first):
2695 """ 2696 http://postgis.org/docs/ST_AsText.html 2697 """ 2698 return 'ST_AsText(%s)' %(self.expand(first))
2699
2700 - def ST_X(self, first):
2701 """ 2702 http://postgis.org/docs/ST_X.html 2703 """ 2704 return 'ST_X(%s)' %(self.expand(first))
2705
2706 - def ST_Y(self, first):
2707 """ 2708 http://postgis.org/docs/ST_Y.html 2709 """ 2710 return 'ST_Y(%s)' %(self.expand(first))
2711
2712 - def ST_CONTAINS(self, first, second):
2713 """ 2714 http://postgis.org/docs/ST_Contains.html 2715 """ 2716 return 'ST_Contains(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2717
2718 - def ST_DISTANCE(self, first, second):
2719 """ 2720 http://postgis.org/docs/ST_Distance.html 2721 """ 2722 return 'ST_Distance(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2723
2724 - def ST_EQUALS(self, first, second):
2725 """ 2726 http://postgis.org/docs/ST_Equals.html 2727 """ 2728 return 'ST_Equals(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2729
2730 - def ST_INTERSECTS(self, first, second):
2731 """ 2732 http://postgis.org/docs/ST_Intersects.html 2733 """ 2734 return 'ST_Intersects(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2735
2736 - def ST_OVERLAPS(self, first, second):
2737 """ 2738 http://postgis.org/docs/ST_Overlaps.html 2739 """ 2740 return 'ST_Overlaps(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2741
2742 - def ST_SIMPLIFY(self, first, second):
2743 """ 2744 http://postgis.org/docs/ST_Simplify.html 2745 """ 2746 return 'ST_Simplify(%s,%s)' %(self.expand(first), self.expand(second, 'double'))
2747
2748 - def ST_TOUCHES(self, first, second):
2749 """ 2750 http://postgis.org/docs/ST_Touches.html 2751 """ 2752 return 'ST_Touches(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2753
2754 - def ST_WITHIN(self, first, second):
2755 """ 2756 http://postgis.org/docs/ST_Within.html 2757 """ 2758 return 'ST_Within(%s,%s)' %(self.expand(first), self.expand(second, first.type))
2759
2760 - def represent(self, obj, fieldtype):
2761 field_is_type = fieldtype.startswith 2762 if field_is_type('geo'): 2763 srid = 4326 # postGIS default srid for geometry 2764 geotype, parms = fieldtype[:-1].split('(') 2765 parms = parms.split(',') 2766 if len(parms) >= 2: 2767 schema, srid = parms[:2] 2768 if field_is_type('geometry'): 2769 value = "ST_GeomFromText('%s',%s)" %(obj, srid) 2770 elif field_is_type('geography'): 2771 value = "ST_GeogFromText('SRID=%s;%s')" %(srid, obj) 2772 # else: 2773 # raise SyntaxError('Invalid field type %s' %fieldtype) 2774 return value 2775 return BaseAdapter.represent(self, obj, fieldtype)
2776
2777 -class NewPostgreSQLAdapter(PostgreSQLAdapter):
2778 drivers = ('psycopg2','pg8000') 2779 2780 types = { 2781 'boolean': 'CHAR(1)', 2782 'string': 'VARCHAR(%(length)s)', 2783 'text': 'TEXT', 2784 'json': 'TEXT', 2785 'password': 'VARCHAR(%(length)s)', 2786 'blob': 'BYTEA', 2787 'upload': 'VARCHAR(%(length)s)', 2788 'integer': 'INTEGER', 2789 'bigint': 'BIGINT', 2790 'float': 'FLOAT', 2791 'double': 'FLOAT8', 2792 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 2793 'date': 'DATE', 2794 'time': 'TIME', 2795 'datetime': 'TIMESTAMP', 2796 'id': 'SERIAL PRIMARY KEY', 2797 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2798 'list:integer': 'BIGINT[]', 2799 'list:string': 'TEXT[]', 2800 'list:reference': 'BIGINT[]', 2801 'geometry': 'GEOMETRY', 2802 'geography': 'GEOGRAPHY', 2803 'big-id': 'BIGSERIAL PRIMARY KEY', 2804 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2805 } 2806
2807 - def parse_list_integers(self, value, field_type):
2808 return value
2809
2810 - def parse_list_references(self, value, field_type):
2811 return [self.parse_reference(r, field_type[5:]) for r in value]
2812
2813 - def parse_list_strings(self, value, field_type):
2814 return value
2815
2816 - def represent(self, obj, fieldtype):
2817 field_is_type = fieldtype.startswith 2818 if field_is_type('list:'): 2819 if not obj: 2820 obj = [] 2821 elif not isinstance(obj, (list, tuple)): 2822 obj = [obj] 2823 if field_is_type('list:string'): 2824 obj = map(str,obj) 2825 else: 2826 obj = map(int,obj) 2827 return 'ARRAY[%s]' % ','.join(repr(item) for item in obj) 2828 return BaseAdapter.represent(self, obj, fieldtype)
2829
2830 2831 -class JDBCPostgreSQLAdapter(PostgreSQLAdapter):
2832 drivers = ('zxJDBC',) 2833 2834 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+)$') 2835
2836 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 2837 credential_decoder=IDENTITY, driver_args={}, 2838 adapter_args={}, do_connect=True, after_connection=None ):
2839 self.db = db 2840 self.dbengine = "postgres" 2841 self.uri = uri 2842 if do_connect: self.find_driver(adapter_args,uri) 2843 self.pool_size = pool_size 2844 self.folder = folder 2845 self.db_codec = db_codec 2846 self._after_connection = after_connection 2847 self.find_or_make_work_folder() 2848 ruri = uri.split('://',1)[1] 2849 m = self.REGEX_URI.match(ruri) 2850 if not m: 2851 raise SyntaxError("Invalid URI string in DAL") 2852 user = credential_decoder(m.group('user')) 2853 if not user: 2854 raise SyntaxError('User required') 2855 password = credential_decoder(m.group('password')) 2856 if not password: 2857 password = '' 2858 host = m.group('host') 2859 if not host: 2860 raise SyntaxError('Host name required') 2861 db = m.group('db') 2862 if not db: 2863 raise SyntaxError('Database name required') 2864 port = m.group('port') or '5432' 2865 msg = ('jdbc:postgresql://%s:%s/%s' % (host, port, db), user, password) 2866 def connector(msg=msg,driver_args=driver_args): 2867 return self.driver.connect(*msg,**driver_args)
2868 self.connector = connector 2869 if do_connect: self.reconnect()
2870
2871 - def after_connection(self):
2872 self.connection.set_client_encoding('UTF8') 2873 self.execute('BEGIN;') 2874 self.execute("SET CLIENT_ENCODING TO 'UNICODE';") 2875 self.try_json()
2876
2877 2878 -class OracleAdapter(BaseAdapter):
2879 drivers = ('cx_Oracle',) 2880 2881 commit_on_alter_table = False 2882 types = { 2883 'boolean': 'CHAR(1)', 2884 'string': 'VARCHAR2(%(length)s)', 2885 'text': 'CLOB', 2886 'json': 'CLOB', 2887 'password': 'VARCHAR2(%(length)s)', 2888 'blob': 'CLOB', 2889 'upload': 'VARCHAR2(%(length)s)', 2890 'integer': 'INT', 2891 'bigint': 'NUMBER', 2892 'float': 'FLOAT', 2893 'double': 'BINARY_DOUBLE', 2894 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 2895 'date': 'DATE', 2896 'time': 'CHAR(8)', 2897 'datetime': 'DATE', 2898 'id': 'NUMBER PRIMARY KEY', 2899 'reference': 'NUMBER, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2900 'list:integer': 'CLOB', 2901 'list:string': 'CLOB', 2902 'list:reference': 'CLOB', 2903 'big-id': 'NUMBER PRIMARY KEY', 2904 'big-reference': 'NUMBER, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2905 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 2906 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 2907 } 2908
2909 - def sequence_name(self,tablename):
2910 return '%s_sequence' % tablename
2911
2912 - def trigger_name(self,tablename):
2913 return '%s_trigger' % tablename
2914
2915 - def LEFT_JOIN(self):
2916 return 'LEFT OUTER JOIN'
2917
2918 - def RANDOM(self):
2919 return 'dbms_random.value'
2920
2921 - def NOT_NULL(self,default,field_type):
2922 return 'DEFAULT %s NOT NULL' % self.represent(default,field_type)
2923
2924 - def _drop(self,table,mode):
2925 sequence_name = table._sequence_name 2926 return ['DROP TABLE %s %s;' % (table, mode), 'DROP SEQUENCE %s;' % sequence_name]
2927
2928 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
2929 if limitby: 2930 (lmin, lmax) = limitby 2931 if len(sql_w) > 1: 2932 sql_w_row = sql_w + ' AND w_row > %i' % lmin 2933 else: 2934 sql_w_row = 'WHERE w_row > %i' % lmin 2935 return 'SELECT %s %s FROM (SELECT w_tmp.*, ROWNUM w_row FROM (SELECT %s FROM %s%s%s) w_tmp WHERE ROWNUM<=%i) %s %s %s;' % (sql_s, sql_f, sql_f, sql_t, sql_w, sql_o, lmax, sql_t, sql_w_row, sql_o) 2936 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
2937
2938 - def constraint_name(self, tablename, fieldname):
2939 constraint_name = BaseAdapter.constraint_name(self, tablename, fieldname) 2940 if len(constraint_name)>30: 2941 constraint_name = '%s_%s__constraint' % (tablename[:10], fieldname[:7]) 2942 return constraint_name
2943
2944 - def represent_exceptions(self, obj, fieldtype):
2945 if fieldtype == 'blob': 2946 obj = base64.b64encode(str(obj)) 2947 return ":CLOB('%s')" % obj 2948 elif fieldtype == 'date': 2949 if isinstance(obj, (datetime.date, datetime.datetime)): 2950 obj = obj.isoformat()[:10] 2951 else: 2952 obj = str(obj) 2953 return "to_date('%s','yyyy-mm-dd')" % obj 2954 elif fieldtype == 'datetime': 2955 if isinstance(obj, datetime.datetime): 2956 obj = obj.isoformat()[:19].replace('T',' ') 2957 elif isinstance(obj, datetime.date): 2958 obj = obj.isoformat()[:10]+' 00:00:00' 2959 else: 2960 obj = str(obj) 2961 return "to_date('%s','yyyy-mm-dd hh24:mi:ss')" % obj 2962 return None
2963
2964 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 2965 credential_decoder=IDENTITY, driver_args={}, 2966 adapter_args={}, do_connect=True, after_connection=None):
2967 self.db = db 2968 self.dbengine = "oracle" 2969 self.uri = uri 2970 if do_connect: self.find_driver(adapter_args,uri) 2971 self.pool_size = pool_size 2972 self.folder = folder 2973 self.db_codec = db_codec 2974 self._after_connection = after_connection 2975 self.find_or_make_work_folder() 2976 ruri = uri.split('://',1)[1] 2977 if not 'threaded' in driver_args: 2978 driver_args['threaded']=True 2979 def connector(uri=ruri,driver_args=driver_args): 2980 return self.driver.connect(uri,**driver_args)
2981 self.connector = connector 2982 if do_connect: self.reconnect()
2983
2984 - def after_connection(self):
2985 self.execute("ALTER SESSION SET NLS_DATE_FORMAT = 'YYYY-MM-DD HH24:MI:SS';") 2986 self.execute("ALTER SESSION SET NLS_TIMESTAMP_FORMAT = 'YYYY-MM-DD HH24:MI:SS';")
2987 2988 oracle_fix = re.compile("[^']*('[^']*'[^']*)*\:(?P<clob>CLOB\('([^']+|'')*'\))") 2989
2990 - def execute(self, command, args=None):
2991 args = args or [] 2992 i = 1 2993 while True: 2994 m = self.oracle_fix.match(command) 2995 if not m: 2996 break 2997 command = command[:m.start('clob')] + str(i) + command[m.end('clob'):] 2998 args.append(m.group('clob')[6:-2].replace("''", "'")) 2999 i += 1 3000 if command[-1:]==';': 3001 command = command[:-1] 3002 return self.log_execute(command, args)
3003
3004 - def create_sequence_and_triggers(self, query, table, **args):
3005 tablename = table._tablename 3006 sequence_name = table._sequence_name 3007 trigger_name = table._trigger_name 3008 self.execute(query) 3009 self.execute('CREATE SEQUENCE %s START WITH 1 INCREMENT BY 1 NOMAXVALUE MINVALUE -1;' % sequence_name) 3010 self.execute(""" 3011 CREATE OR REPLACE TRIGGER %(trigger_name)s BEFORE INSERT ON %(tablename)s FOR EACH ROW 3012 DECLARE 3013 curr_val NUMBER; 3014 diff_val NUMBER; 3015 PRAGMA autonomous_transaction; 3016 BEGIN 3017 IF :NEW.id IS NOT NULL THEN 3018 EXECUTE IMMEDIATE 'SELECT %(sequence_name)s.nextval FROM dual' INTO curr_val; 3019 diff_val := :NEW.id - curr_val - 1; 3020 IF diff_val != 0 THEN 3021 EXECUTE IMMEDIATE 'alter sequence %(sequence_name)s increment by '|| diff_val; 3022 EXECUTE IMMEDIATE 'SELECT %(sequence_name)s.nextval FROM dual' INTO curr_val; 3023 EXECUTE IMMEDIATE 'alter sequence %(sequence_name)s increment by 1'; 3024 END IF; 3025 END IF; 3026 SELECT %(sequence_name)s.nextval INTO :NEW.id FROM DUAL; 3027 END; 3028 """ % dict(trigger_name=trigger_name, tablename=tablename, sequence_name=sequence_name))
3029
3030 - def lastrowid(self,table):
3031 sequence_name = table._sequence_name 3032 self.execute('SELECT %s.currval FROM dual;' % sequence_name) 3033 return int(self.cursor.fetchone()[0])
3034 3035 #def parse_value(self, value, field_type, blob_decode=True): 3036 # if blob_decode and isinstance(value, cx_Oracle.LOB): 3037 # try: 3038 # value = value.read() 3039 # except self.driver.ProgrammingError: 3040 # # After a subsequent fetch the LOB value is not valid anymore 3041 # pass 3042 # return BaseAdapter.parse_value(self, value, field_type, blob_decode) 3043
3044 - def _fetchall(self):
3045 if any(x[1]==cx_Oracle.CLOB for x in self.cursor.description): 3046 return [tuple([(c.read() if type(c) == cx_Oracle.LOB else c) \ 3047 for c in r]) for r in self.cursor] 3048 else: 3049 return self.cursor.fetchall()
3050
3051 -class MSSQLAdapter(BaseAdapter):
3052 drivers = ('pyodbc',) 3053 T_SEP = 'T' 3054 3055 types = { 3056 'boolean': 'BIT', 3057 'string': 'VARCHAR(%(length)s)', 3058 'text': 'TEXT', 3059 'json': 'TEXT', 3060 'password': 'VARCHAR(%(length)s)', 3061 'blob': 'IMAGE', 3062 'upload': 'VARCHAR(%(length)s)', 3063 'integer': 'INT', 3064 'bigint': 'BIGINT', 3065 'float': 'FLOAT', 3066 'double': 'FLOAT', 3067 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 3068 'date': 'DATETIME', 3069 'time': 'CHAR(8)', 3070 'datetime': 'DATETIME', 3071 'id': 'INT IDENTITY PRIMARY KEY', 3072 'reference': 'INT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3073 'list:integer': 'TEXT', 3074 'list:string': 'TEXT', 3075 'list:reference': 'TEXT', 3076 'geometry': 'geometry', 3077 'geography': 'geography', 3078 'big-id': 'BIGINT IDENTITY PRIMARY KEY', 3079 'big-reference': 'BIGINT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3080 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3081 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 3082 } 3083
3084 - def concat_add(self,tablename):
3085 return '; ALTER TABLE %s ADD ' % tablename
3086
3087 - def varquote(self,name):
3088 return varquote_aux(name,'[%s]')
3089
3090 - def EXTRACT(self,field,what):
3091 return "DATEPART(%s,%s)" % (what, self.expand(field))
3092
3093 - def LEFT_JOIN(self):
3094 return 'LEFT OUTER JOIN'
3095
3096 - def RANDOM(self):
3097 return 'NEWID()'
3098
3099 - def ALLOW_NULL(self):
3100 return ' NULL'
3101
3102 - def SUBSTRING(self,field,parameters):
3103 return 'SUBSTRING(%s,%s,%s)' % (self.expand(field), parameters[0], parameters[1])
3104
3105 - def PRIMARY_KEY(self,key):
3106 return 'PRIMARY KEY CLUSTERED (%s)' % key
3107
3108 - def AGGREGATE(self, first, what):
3109 if what == 'LENGTH': 3110 what = 'LEN' 3111 return "%s(%s)" % (what, self.expand(first))
3112 3113
3114 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3115 if limitby: 3116 (lmin, lmax) = limitby 3117 sql_s += ' TOP %i' % lmax 3118 if 'GROUP BY' in sql_o: 3119 orderfound = sql_o.find('ORDER BY ') 3120 if orderfound >= 0: 3121 sql_o = sql_o[:orderfound] 3122 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
3123 3124 TRUE = 1 3125 FALSE = 0 3126 3127 REGEX_DSN = re.compile('^(?P<dsn>.+)$') 3128 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^\?]+)(\?(?P<urlargs>.*))?$') 3129 REGEX_ARGPATTERN = re.compile('(?P<argkey>[^=]+)=(?P<argvalue>[^&]*)') 3130
3131 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3132 credential_decoder=IDENTITY, driver_args={}, 3133 adapter_args={}, do_connect=True, srid=4326, 3134 after_connection=None):
3135 self.db = db 3136 self.dbengine = "mssql" 3137 self.uri = uri 3138 if do_connect: self.find_driver(adapter_args,uri) 3139 self.pool_size = pool_size 3140 self.folder = folder 3141 self.db_codec = db_codec 3142 self._after_connection = after_connection 3143 self.srid = srid 3144 self.find_or_make_work_folder() 3145 # ## read: http://bytes.com/groups/python/460325-cx_oracle-utf8 3146 ruri = uri.split('://',1)[1] 3147 if '@' not in ruri: 3148 try: 3149 m = self.REGEX_DSN.match(ruri) 3150 if not m: 3151 raise SyntaxError( 3152 'Parsing uri string(%s) has no result' % self.uri) 3153 dsn = m.group('dsn') 3154 if not dsn: 3155 raise SyntaxError('DSN required') 3156 except SyntaxError: 3157 e = sys.exc_info()[1] 3158 LOGGER.error('NdGpatch error') 3159 raise e 3160 # was cnxn = 'DSN=%s' % dsn 3161 cnxn = dsn 3162 else: 3163 m = self.REGEX_URI.match(ruri) 3164 if not m: 3165 raise SyntaxError( 3166 "Invalid URI string in DAL: %s" % self.uri) 3167 user = credential_decoder(m.group('user')) 3168 if not user: 3169 raise SyntaxError('User required') 3170 password = credential_decoder(m.group('password')) 3171 if not password: 3172 password = '' 3173 host = m.group('host') 3174 if not host: 3175 raise SyntaxError('Host name required') 3176 db = m.group('db') 3177 if not db: 3178 raise SyntaxError('Database name required') 3179 port = m.group('port') or '1433' 3180 # Parse the optional url name-value arg pairs after the '?' 3181 # (in the form of arg1=value1&arg2=value2&...) 3182 # Default values (drivers like FreeTDS insist on uppercase parameter keys) 3183 argsdict = { 'DRIVER':'{SQL Server}' } 3184 urlargs = m.group('urlargs') or '' 3185 for argmatch in self.REGEX_ARGPATTERN.finditer(urlargs): 3186 argsdict[str(argmatch.group('argkey')).upper()] = argmatch.group('argvalue') 3187 urlargs = ';'.join(['%s=%s' % (ak, av) for (ak, av) in argsdict.iteritems()]) 3188 cnxn = 'SERVER=%s;PORT=%s;DATABASE=%s;UID=%s;PWD=%s;%s' \ 3189 % (host, port, db, user, password, urlargs) 3190 def connector(cnxn=cnxn,driver_args=driver_args): 3191 return self.driver.connect(cnxn,**driver_args)
3192 self.connector = connector 3193 if do_connect: self.reconnect()
3194
3195 - def lastrowid(self,table):
3196 #self.execute('SELECT @@IDENTITY;') 3197 self.execute('SELECT SCOPE_IDENTITY();') 3198 return int(self.cursor.fetchone()[0])
3199
3200 - def integrity_error_class(self):
3201 return pyodbc.IntegrityError
3202
3203 - def rowslice(self,rows,minimum=0,maximum=None):
3204 if maximum is None: 3205 return rows[minimum:] 3206 return rows[minimum:maximum]
3207
3208 - def EPOCH(self, first):
3209 return "DATEDIFF(second, '1970-01-01 00:00:00', %s)" % self.expand(first)
3210
3211 - def CONCAT(self, *items):
3212 return '(%s)' % ' + '.join(self.expand(x,'string') for x in items)
3213 3214 # GIS Spatial Extensions 3215 3216 # No STAsGeoJSON in MSSQL 3217
3218 - def ST_ASTEXT(self, first):
3219 return '%s.STAsText()' %(self.expand(first))
3220
3221 - def ST_CONTAINS(self, first, second):
3222 return '%s.STContains(%s)=1' %(self.expand(first), self.expand(second, first.type))
3223
3224 - def ST_DISTANCE(self, first, second):
3225 return '%s.STDistance(%s)' %(self.expand(first), self.expand(second, first.type))
3226
3227 - def ST_EQUALS(self, first, second):
3228 return '%s.STEquals(%s)=1' %(self.expand(first), self.expand(second, first.type))
3229
3230 - def ST_INTERSECTS(self, first, second):
3231 return '%s.STIntersects(%s)=1' %(self.expand(first), self.expand(second, first.type))
3232
3233 - def ST_OVERLAPS(self, first, second):
3234 return '%s.STOverlaps(%s)=1' %(self.expand(first), self.expand(second, first.type))
3235 3236 # no STSimplify in MSSQL 3237
3238 - def ST_TOUCHES(self, first, second):
3239 return '%s.STTouches(%s)=1' %(self.expand(first), self.expand(second, first.type))
3240
3241 - def ST_WITHIN(self, first, second):
3242 return '%s.STWithin(%s)=1' %(self.expand(first), self.expand(second, first.type))
3243
3244 - def represent(self, obj, fieldtype):
3245 field_is_type = fieldtype.startswith 3246 if field_is_type('geometry'): 3247 srid = 0 # MS SQL default srid for geometry 3248 geotype, parms = fieldtype[:-1].split('(') 3249 if parms: 3250 srid = parms 3251 return "geometry::STGeomFromText('%s',%s)" %(obj, srid) 3252 elif fieldtype == 'geography': 3253 srid = 4326 # MS SQL default srid for geography 3254 geotype, parms = fieldtype[:-1].split('(') 3255 if parms: 3256 srid = parms 3257 return "geography::STGeomFromText('%s',%s)" %(obj, srid) 3258 # else: 3259 # raise SyntaxError('Invalid field type %s' %fieldtype) 3260 return "geometry::STGeomFromText('%s',%s)" %(obj, srid) 3261 return BaseAdapter.represent(self, obj, fieldtype)
3262
3263 3264 -class MSSQL3Adapter(MSSQLAdapter):
3265 """ experimental support for pagination in MSSQL"""
3266 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3267 if limitby: 3268 (lmin, lmax) = limitby 3269 if lmin == 0: 3270 sql_s += ' TOP %i' % lmax 3271 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o) 3272 lmin += 1 3273 sql_o_inner = sql_o[sql_o.find('ORDER BY ')+9:] 3274 sql_g_inner = sql_o[:sql_o.find('ORDER BY ')] 3275 sql_f_outer = ['f_%s' % f for f in range(len(sql_f.split(',')))] 3276 sql_f_inner = [f for f in sql_f.split(',')] 3277 sql_f_iproxy = ['%s AS %s' % (o, n) for (o, n) in zip(sql_f_inner, sql_f_outer)] 3278 sql_f_iproxy = ', '.join(sql_f_iproxy) 3279 sql_f_oproxy = ', '.join(sql_f_outer) 3280 return 'SELECT %s %s FROM (SELECT %s ROW_NUMBER() OVER (ORDER BY %s) AS w_row, %s FROM %s%s%s) TMP WHERE w_row BETWEEN %i AND %s;' % (sql_s,sql_f_oproxy,sql_s,sql_f,sql_f_iproxy,sql_t,sql_w,sql_g_inner,lmin,lmax) 3281 return 'SELECT %s %s FROM %s%s%s;' % (sql_s,sql_f,sql_t,sql_w,sql_o)
3282 - def rowslice(self,rows,minimum=0,maximum=None):
3283 return rows
3284
3285 3286 -class MSSQL2Adapter(MSSQLAdapter):
3287 drivers = ('pyodbc',) 3288 3289 types = { 3290 'boolean': 'CHAR(1)', 3291 'string': 'NVARCHAR(%(length)s)', 3292 'text': 'NTEXT', 3293 'json': 'NTEXT', 3294 'password': 'NVARCHAR(%(length)s)', 3295 'blob': 'IMAGE', 3296 'upload': 'NVARCHAR(%(length)s)', 3297 'integer': 'INT', 3298 'bigint': 'BIGINT', 3299 'float': 'FLOAT', 3300 'double': 'FLOAT', 3301 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 3302 'date': 'DATETIME', 3303 'time': 'CHAR(8)', 3304 'datetime': 'DATETIME', 3305 'id': 'INT IDENTITY PRIMARY KEY', 3306 'reference': 'INT, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3307 'list:integer': 'NTEXT', 3308 'list:string': 'NTEXT', 3309 'list:reference': 'NTEXT', 3310 'big-id': 'BIGINT IDENTITY PRIMARY KEY', 3311 'big-reference': 'BIGINT, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3312 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3313 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 3314 } 3315
3316 - def represent(self, obj, fieldtype):
3317 value = BaseAdapter.represent(self, obj, fieldtype) 3318 if fieldtype in ('string','text', 'json') and value[:1]=="'": 3319 value = 'N'+value 3320 return value
3321
3322 - def execute(self,a):
3323 return self.log_execute(a.decode('utf8'))
3324
3325 3326 -class SybaseAdapter(MSSQLAdapter):
3327 drivers = ('Sybase',) 3328 3329 types = { 3330 'boolean': 'BIT', 3331 'string': 'CHAR VARYING(%(length)s)', 3332 'text': 'TEXT', 3333 'json': 'TEXT', 3334 'password': 'CHAR VARYING(%(length)s)', 3335 'blob': 'IMAGE', 3336 'upload': 'CHAR VARYING(%(length)s)', 3337 'integer': 'INT', 3338 'bigint': 'BIGINT', 3339 'float': 'FLOAT', 3340 'double': 'FLOAT', 3341 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 3342 'date': 'DATETIME', 3343 'time': 'CHAR(8)', 3344 'datetime': 'DATETIME', 3345 'id': 'INT IDENTITY PRIMARY KEY', 3346 'reference': 'INT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3347 'list:integer': 'TEXT', 3348 'list:string': 'TEXT', 3349 'list:reference': 'TEXT', 3350 'geometry': 'geometry', 3351 'geography': 'geography', 3352 'big-id': 'BIGINT IDENTITY PRIMARY KEY', 3353 'big-reference': 'BIGINT NULL, CONSTRAINT %(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3354 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3355 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 3356 } 3357 3358
3359 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3360 credential_decoder=IDENTITY, driver_args={}, 3361 adapter_args={}, do_connect=True, srid=4326, 3362 after_connection=None):
3363 self.db = db 3364 self.dbengine = "sybase" 3365 self.uri = uri 3366 if do_connect: self.find_driver(adapter_args,uri) 3367 self.pool_size = pool_size 3368 self.folder = folder 3369 self.db_codec = db_codec 3370 self._after_connection = after_connection 3371 self.srid = srid 3372 self.find_or_make_work_folder() 3373 # ## read: http://bytes.com/groups/python/460325-cx_oracle-utf8 3374 ruri = uri.split('://',1)[1] 3375 if '@' not in ruri: 3376 try: 3377 m = self.REGEX_DSN.match(ruri) 3378 if not m: 3379 raise SyntaxError( 3380 'Parsing uri string(%s) has no result' % self.uri) 3381 dsn = m.group('dsn') 3382 if not dsn: 3383 raise SyntaxError('DSN required') 3384 except SyntaxError: 3385 e = sys.exc_info()[1] 3386 LOGGER.error('NdGpatch error') 3387 raise e 3388 else: 3389 m = self.REGEX_URI.match(uri) 3390 if not m: 3391 raise SyntaxError( 3392 "Invalid URI string in DAL: %s" % self.uri) 3393 user = credential_decoder(m.group('user')) 3394 if not user: 3395 raise SyntaxError('User required') 3396 password = credential_decoder(m.group('password')) 3397 if not password: 3398 password = '' 3399 host = m.group('host') 3400 if not host: 3401 raise SyntaxError('Host name required') 3402 db = m.group('db') 3403 if not db: 3404 raise SyntaxError('Database name required') 3405 port = m.group('port') or '1433' 3406 3407 dsn = 'sybase:host=%s:%s;dbname=%s' % (host,port,db) 3408 3409 driver_args.update(user = credential_decoder(user), 3410 password = credential_decoder(password)) 3411 3412 def connector(dsn=dsn,driver_args=driver_args): 3413 return self.driver.connect(dsn,**driver_args)
3414 self.connector = connector 3415 if do_connect: self.reconnect()
3416
3417 - def integrity_error_class(self):
3418 return RuntimeError # FIX THIS
3419
3420 3421 -class FireBirdAdapter(BaseAdapter):
3422 drivers = ('kinterbasdb','firebirdsql','fdb','pyodbc') 3423 3424 commit_on_alter_table = False 3425 support_distributed_transaction = True 3426 types = { 3427 'boolean': 'CHAR(1)', 3428 'string': 'VARCHAR(%(length)s)', 3429 'text': 'BLOB SUB_TYPE 1', 3430 'json': 'BLOB SUB_TYPE 1', 3431 'password': 'VARCHAR(%(length)s)', 3432 'blob': 'BLOB SUB_TYPE 0', 3433 'upload': 'VARCHAR(%(length)s)', 3434 'integer': 'INTEGER', 3435 'bigint': 'BIGINT', 3436 'float': 'FLOAT', 3437 'double': 'DOUBLE PRECISION', 3438 'decimal': 'DECIMAL(%(precision)s,%(scale)s)', 3439 'date': 'DATE', 3440 'time': 'TIME', 3441 'datetime': 'TIMESTAMP', 3442 'id': 'INTEGER PRIMARY KEY', 3443 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3444 'list:integer': 'BLOB SUB_TYPE 1', 3445 'list:string': 'BLOB SUB_TYPE 1', 3446 'list:reference': 'BLOB SUB_TYPE 1', 3447 'big-id': 'BIGINT PRIMARY KEY', 3448 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3449 } 3450
3451 - def sequence_name(self,tablename):
3452 return 'genid_%s' % tablename
3453
3454 - def trigger_name(self,tablename):
3455 return 'trg_id_%s' % tablename
3456
3457 - def RANDOM(self):
3458 return 'RAND()'
3459
3460 - def EPOCH(self, first):
3461 return "DATEDIFF(second, '1970-01-01 00:00:00', %s)" % self.expand(first)
3462
3463 - def NOT_NULL(self,default,field_type):
3464 return 'DEFAULT %s NOT NULL' % self.represent(default,field_type)
3465
3466 - def SUBSTRING(self,field,parameters):
3467 return 'SUBSTRING(%s from %s for %s)' % (self.expand(field), parameters[0], parameters[1])
3468
3469 - def LENGTH(self, first):
3470 return "CHAR_LENGTH(%s)" % self.expand(first)
3471
3472 - def CONTAINS(self,first,second,case_sensitive=False):
3473 if first.type.startswith('list:'): 3474 second = Expression(None,self.CONCAT('|',Expression( 3475 None,self.REPLACE(second,('|','||'))),'|')) 3476 return '(%s CONTAINING %s)' % (self.expand(first), 3477 self.expand(second, 'string'))
3478
3479 - def _drop(self,table,mode):
3480 sequence_name = table._sequence_name 3481 return ['DROP TABLE %s %s;' % (table, mode), 'DROP GENERATOR %s;' % sequence_name]
3482
3483 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3484 if limitby: 3485 (lmin, lmax) = limitby 3486 sql_s = ' FIRST %i SKIP %i %s' % (lmax - lmin, lmin, sql_s) 3487 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
3488
3489 - def _truncate(self,table,mode = ''):
3490 return ['DELETE FROM %s;' % table._tablename, 3491 'SET GENERATOR %s TO 0;' % table._sequence_name]
3492 3493 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+?)(\?set_encoding=(?P<charset>\w+))?$') 3494
3495 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3496 credential_decoder=IDENTITY, driver_args={}, 3497 adapter_args={}, do_connect=True, after_connection=None):
3498 self.db = db 3499 self.dbengine = "firebird" 3500 self.uri = uri 3501 if do_connect: self.find_driver(adapter_args,uri) 3502 self.pool_size = pool_size 3503 self.folder = folder 3504 self.db_codec = db_codec 3505 self._after_connection = after_connection 3506 self.find_or_make_work_folder() 3507 ruri = uri.split('://',1)[1] 3508 m = self.REGEX_URI.match(ruri) 3509 if not m: 3510 raise SyntaxError("Invalid URI string in DAL: %s" % self.uri) 3511 user = credential_decoder(m.group('user')) 3512 if not user: 3513 raise SyntaxError('User required') 3514 password = credential_decoder(m.group('password')) 3515 if not password: 3516 password = '' 3517 host = m.group('host') 3518 if not host: 3519 raise SyntaxError('Host name required') 3520 port = int(m.group('port') or 3050) 3521 db = m.group('db') 3522 if not db: 3523 raise SyntaxError('Database name required') 3524 charset = m.group('charset') or 'UTF8' 3525 driver_args.update(dsn='%s/%s:%s' % (host,port,db), 3526 user = credential_decoder(user), 3527 password = credential_decoder(password), 3528 charset = charset) 3529 3530 def connector(driver_args=driver_args): 3531 return self.driver.connect(**driver_args)
3532 self.connector = connector 3533 if do_connect: self.reconnect()
3534
3535 - def create_sequence_and_triggers(self, query, table, **args):
3536 tablename = table._tablename 3537 sequence_name = table._sequence_name 3538 trigger_name = table._trigger_name 3539 self.execute(query) 3540 self.execute('create generator %s;' % sequence_name) 3541 self.execute('set generator %s to 0;' % sequence_name) 3542 self.execute('create trigger %s for %s active before insert position 0 as\nbegin\nif(new.id is null) then\nbegin\nnew.id = gen_id(%s, 1);\nend\nend;' % (trigger_name, tablename, sequence_name))
3543
3544 - def lastrowid(self,table):
3545 sequence_name = table._sequence_name 3546 self.execute('SELECT gen_id(%s, 0) FROM rdb$database' % sequence_name) 3547 return int(self.cursor.fetchone()[0])
3548
3549 3550 -class FireBirdEmbeddedAdapter(FireBirdAdapter):
3551 drivers = ('kinterbasdb','firebirdsql','fdb','pyodbc') 3552 3553 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<path>[^\?]+)(\?set_encoding=(?P<charset>\w+))?$') 3554
3555 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3556 credential_decoder=IDENTITY, driver_args={}, 3557 adapter_args={}, do_connect=True, after_connection=None):
3558 self.db = db 3559 self.dbengine = "firebird" 3560 self.uri = uri 3561 if do_connect: self.find_driver(adapter_args,uri) 3562 self.pool_size = pool_size 3563 self.folder = folder 3564 self.db_codec = db_codec 3565 self._after_connection = after_connection 3566 self.find_or_make_work_folder() 3567 ruri = uri.split('://',1)[1] 3568 m = self.REGEX_URI.match(ruri) 3569 if not m: 3570 raise SyntaxError( 3571 "Invalid URI string in DAL: %s" % self.uri) 3572 user = credential_decoder(m.group('user')) 3573 if not user: 3574 raise SyntaxError('User required') 3575 password = credential_decoder(m.group('password')) 3576 if not password: 3577 password = '' 3578 pathdb = m.group('path') 3579 if not pathdb: 3580 raise SyntaxError('Path required') 3581 charset = m.group('charset') 3582 if not charset: 3583 charset = 'UTF8' 3584 host = '' 3585 driver_args.update(host=host, 3586 database=pathdb, 3587 user=credential_decoder(user), 3588 password=credential_decoder(password), 3589 charset=charset) 3590 3591 def connector(driver_args=driver_args): 3592 return self.driver.connect(**driver_args)
3593 self.connector = connector 3594 if do_connect: self.reconnect()
3595
3596 -class InformixAdapter(BaseAdapter):
3597 drivers = ('informixdb',) 3598 3599 types = { 3600 'boolean': 'CHAR(1)', 3601 'string': 'VARCHAR(%(length)s)', 3602 'text': 'BLOB SUB_TYPE 1', 3603 'json': 'BLOB SUB_TYPE 1', 3604 'password': 'VARCHAR(%(length)s)', 3605 'blob': 'BLOB SUB_TYPE 0', 3606 'upload': 'VARCHAR(%(length)s)', 3607 'integer': 'INTEGER', 3608 'bigint': 'BIGINT', 3609 'float': 'FLOAT', 3610 'double': 'DOUBLE PRECISION', 3611 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 3612 'date': 'DATE', 3613 'time': 'CHAR(8)', 3614 'datetime': 'DATETIME', 3615 'id': 'SERIAL', 3616 'reference': 'INTEGER REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3617 'list:integer': 'BLOB SUB_TYPE 1', 3618 'list:string': 'BLOB SUB_TYPE 1', 3619 'list:reference': 'BLOB SUB_TYPE 1', 3620 'big-id': 'BIGSERIAL', 3621 'big-reference': 'BIGINT REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3622 'reference FK': 'REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s CONSTRAINT FK_%(table_name)s_%(field_name)s', 3623 'reference TFK': 'FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s CONSTRAINT TFK_%(table_name)s_%(field_name)s', 3624 } 3625
3626 - def RANDOM(self):
3627 return 'Random()'
3628
3629 - def NOT_NULL(self,default,field_type):
3630 return 'DEFAULT %s NOT NULL' % self.represent(default,field_type)
3631
3632 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3633 if limitby: 3634 (lmin, lmax) = limitby 3635 fetch_amt = lmax - lmin 3636 dbms_version = int(self.connection.dbms_version.split('.')[0]) 3637 if lmin and (dbms_version >= 10): 3638 # Requires Informix 10.0+ 3639 sql_s += ' SKIP %d' % (lmin, ) 3640 if fetch_amt and (dbms_version >= 9): 3641 # Requires Informix 9.0+ 3642 sql_s += ' FIRST %d' % (fetch_amt, ) 3643 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
3644
3645 - def represent_exceptions(self, obj, fieldtype):
3646 if fieldtype == 'date': 3647 if isinstance(obj, (datetime.date, datetime.datetime)): 3648 obj = obj.isoformat()[:10] 3649 else: 3650 obj = str(obj) 3651 return "to_date('%s','%%Y-%%m-%%d')" % obj 3652 elif fieldtype == 'datetime': 3653 if isinstance(obj, datetime.datetime): 3654 obj = obj.isoformat()[:19].replace('T',' ') 3655 elif isinstance(obj, datetime.date): 3656 obj = obj.isoformat()[:10]+' 00:00:00' 3657 else: 3658 obj = str(obj) 3659 return "to_date('%s','%%Y-%%m-%%d %%H:%%M:%%S')" % obj 3660 return None
3661 3662 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>.+)$') 3663
3664 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3665 credential_decoder=IDENTITY, driver_args={}, 3666 adapter_args={}, do_connect=True, after_connection=None):
3667 self.db = db 3668 self.dbengine = "informix" 3669 self.uri = uri 3670 if do_connect: self.find_driver(adapter_args,uri) 3671 self.pool_size = pool_size 3672 self.folder = folder 3673 self.db_codec = db_codec 3674 self._after_connection = after_connection 3675 self.find_or_make_work_folder() 3676 ruri = uri.split('://',1)[1] 3677 m = self.REGEX_URI.match(ruri) 3678 if not m: 3679 raise SyntaxError( 3680 "Invalid URI string in DAL: %s" % self.uri) 3681 user = credential_decoder(m.group('user')) 3682 if not user: 3683 raise SyntaxError('User required') 3684 password = credential_decoder(m.group('password')) 3685 if not password: 3686 password = '' 3687 host = m.group('host') 3688 if not host: 3689 raise SyntaxError('Host name required') 3690 db = m.group('db') 3691 if not db: 3692 raise SyntaxError('Database name required') 3693 user = credential_decoder(user) 3694 password = credential_decoder(password) 3695 dsn = '%s@%s' % (db,host) 3696 driver_args.update(user=user,password=password,autocommit=True) 3697 def connector(dsn=dsn,driver_args=driver_args): 3698 return self.driver.connect(dsn,**driver_args)
3699 self.connector = connector 3700 if do_connect: self.reconnect()
3701
3702 - def execute(self,command):
3703 if command[-1:]==';': 3704 command = command[:-1] 3705 return self.log_execute(command)
3706
3707 - def lastrowid(self,table):
3708 return self.cursor.sqlerrd[1]
3709
3710 - def integrity_error_class(self):
3711 return informixdb.IntegrityError
3712
3713 -class InformixSEAdapter(InformixAdapter):
3714 """ work in progress """ 3715
3716 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3717 return 'SELECT %s %s FROM %s%s%s;' % \ 3718 (sql_s, sql_f, sql_t, sql_w, sql_o)
3719
3720 - def rowslice(self,rows,minimum=0,maximum=None):
3721 if maximum is None: 3722 return rows[minimum:] 3723 return rows[minimum:maximum]
3724
3725 -class DB2Adapter(BaseAdapter):
3726 drivers = ('pyodbc',) 3727 3728 types = { 3729 'boolean': 'CHAR(1)', 3730 'string': 'VARCHAR(%(length)s)', 3731 'text': 'CLOB', 3732 'json': 'CLOB', 3733 'password': 'VARCHAR(%(length)s)', 3734 'blob': 'BLOB', 3735 'upload': 'VARCHAR(%(length)s)', 3736 'integer': 'INT', 3737 'bigint': 'BIGINT', 3738 'float': 'REAL', 3739 'double': 'DOUBLE', 3740 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 3741 'date': 'DATE', 3742 'time': 'TIME', 3743 'datetime': 'TIMESTAMP', 3744 'id': 'INT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL', 3745 'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3746 'list:integer': 'CLOB', 3747 'list:string': 'CLOB', 3748 'list:reference': 'CLOB', 3749 'big-id': 'BIGINT GENERATED ALWAYS AS IDENTITY PRIMARY KEY NOT NULL', 3750 'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3751 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3752 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', 3753 } 3754
3755 - def LEFT_JOIN(self):
3756 return 'LEFT OUTER JOIN'
3757
3758 - def RANDOM(self):
3759 return 'RAND()'
3760
3761 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3762 if limitby: 3763 (lmin, lmax) = limitby 3764 sql_o += ' FETCH FIRST %i ROWS ONLY' % lmax 3765 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
3766
3767 - def represent_exceptions(self, obj, fieldtype):
3768 if fieldtype == 'blob': 3769 obj = base64.b64encode(str(obj)) 3770 return "BLOB('%s')" % obj 3771 elif fieldtype == 'datetime': 3772 if isinstance(obj, datetime.datetime): 3773 obj = obj.isoformat()[:19].replace('T','-').replace(':','.') 3774 elif isinstance(obj, datetime.date): 3775 obj = obj.isoformat()[:10]+'-00.00.00' 3776 return "'%s'" % obj 3777 return None
3778
3779 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3780 credential_decoder=IDENTITY, driver_args={}, 3781 adapter_args={}, do_connect=True, after_connection=None):
3782 self.db = db 3783 self.dbengine = "db2" 3784 self.uri = uri 3785 if do_connect: self.find_driver(adapter_args,uri) 3786 self.pool_size = pool_size 3787 self.folder = folder 3788 self.db_codec = db_codec 3789 self._after_connection = after_connection 3790 self.find_or_make_work_folder() 3791 ruri = uri.split('://', 1)[1] 3792 def connector(cnxn=ruri,driver_args=driver_args): 3793 return self.driver.connect(cnxn,**driver_args)
3794 self.connector = connector 3795 if do_connect: self.reconnect()
3796
3797 - def execute(self,command):
3798 if command[-1:]==';': 3799 command = command[:-1] 3800 return self.log_execute(command)
3801
3802 - def lastrowid(self,table):
3803 self.execute('SELECT DISTINCT IDENTITY_VAL_LOCAL() FROM %s;' % table) 3804 return int(self.cursor.fetchone()[0])
3805
3806 - def rowslice(self,rows,minimum=0,maximum=None):
3807 if maximum is None: 3808 return rows[minimum:] 3809 return rows[minimum:maximum]
3810
3811 3812 -class TeradataAdapter(BaseAdapter):
3813 drivers = ('pyodbc',) 3814 3815 types = { 3816 'boolean': 'CHAR(1)', 3817 'string': 'VARCHAR(%(length)s)', 3818 'text': 'CLOB', 3819 'json': 'CLOB', 3820 'password': 'VARCHAR(%(length)s)', 3821 'blob': 'BLOB', 3822 'upload': 'VARCHAR(%(length)s)', 3823 'integer': 'INT', 3824 'bigint': 'BIGINT', 3825 'float': 'REAL', 3826 'double': 'DOUBLE', 3827 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 3828 'date': 'DATE', 3829 'time': 'TIME', 3830 'datetime': 'TIMESTAMP', 3831 # Modified Constraint syntax for Teradata. 3832 # Teradata does not support ON DELETE. 3833 'id': 'INT GENERATED ALWAYS AS IDENTITY', # Teradata Specific 3834 'reference': 'INT', 3835 'list:integer': 'CLOB', 3836 'list:string': 'CLOB', 3837 'list:reference': 'CLOB', 3838 'big-id': 'BIGINT GENERATED ALWAYS AS IDENTITY', # Teradata Specific 3839 'big-reference': 'BIGINT', 3840 'reference FK': ' REFERENCES %(foreign_key)s', 3841 'reference TFK': ' FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s)', 3842 } 3843
3844 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3845 credential_decoder=IDENTITY, driver_args={}, 3846 adapter_args={}, do_connect=True, after_connection=None):
3847 self.db = db 3848 self.dbengine = "teradata" 3849 self.uri = uri 3850 if do_connect: self.find_driver(adapter_args,uri) 3851 self.pool_size = pool_size 3852 self.folder = folder 3853 self.db_codec = db_codec 3854 self._after_connection = after_connection 3855 self.find_or_make_work_folder() 3856 ruri = uri.split('://', 1)[1] 3857 def connector(cnxn=ruri,driver_args=driver_args): 3858 return self.driver.connect(cnxn,**driver_args)
3859 self.connector = connector 3860 if do_connect: self.reconnect()
3861
3862 - def LEFT_JOIN(self):
3863 return 'LEFT OUTER JOIN'
3864 3865 # Similar to MSSQL, Teradata can't specify a range (for Pageby)
3866 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3867 if limitby: 3868 (lmin, lmax) = limitby 3869 sql_s += ' TOP %i' % lmax 3870 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
3871
3872 - def _truncate(self, table, mode=''):
3873 tablename = table._tablename 3874 return ['DELETE FROM %s ALL;' % (tablename)]
3875 3876 INGRES_SEQNAME='ii***lineitemsequence' # NOTE invalid database object name
3877 # (ANSI-SQL wants this form of name 3878 # to be a delimited identifier) 3879 3880 -class IngresAdapter(BaseAdapter):
3881 drivers = ('pyodbc',) 3882 3883 types = { 3884 'boolean': 'CHAR(1)', 3885 'string': 'VARCHAR(%(length)s)', 3886 'text': 'CLOB', 3887 'json': 'CLOB', 3888 'password': 'VARCHAR(%(length)s)', ## Not sure what this contains utf8 or nvarchar. Or even bytes? 3889 'blob': 'BLOB', 3890 'upload': 'VARCHAR(%(length)s)', ## FIXME utf8 or nvarchar... or blob? what is this type? 3891 'integer': 'INTEGER4', # or int8... 3892 'bigint': 'BIGINT', 3893 'float': 'FLOAT', 3894 'double': 'FLOAT8', 3895 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 3896 'date': 'ANSIDATE', 3897 'time': 'TIME WITHOUT TIME ZONE', 3898 'datetime': 'TIMESTAMP WITHOUT TIME ZONE', 3899 'id': 'int not null unique with default next value for %s' % INGRES_SEQNAME, 3900 'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3901 'list:integer': 'CLOB', 3902 'list:string': 'CLOB', 3903 'list:reference': 'CLOB', 3904 'big-id': 'bigint not null unique with default next value for %s' % INGRES_SEQNAME, 3905 'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3906 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 3907 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', ## FIXME TODO 3908 } 3909
3910 - def LEFT_JOIN(self):
3911 return 'LEFT OUTER JOIN'
3912
3913 - def RANDOM(self):
3914 return 'RANDOM()'
3915
3916 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
3917 if limitby: 3918 (lmin, lmax) = limitby 3919 fetch_amt = lmax - lmin 3920 if fetch_amt: 3921 sql_s += ' FIRST %d ' % (fetch_amt, ) 3922 if lmin: 3923 # Requires Ingres 9.2+ 3924 sql_o += ' OFFSET %d' % (lmin, ) 3925 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
3926
3927 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 3928 credential_decoder=IDENTITY, driver_args={}, 3929 adapter_args={}, do_connect=True, after_connection=None):
3930 self.db = db 3931 self.dbengine = "ingres" 3932 self._driver = pyodbc 3933 self.uri = uri 3934 if do_connect: self.find_driver(adapter_args,uri) 3935 self.pool_size = pool_size 3936 self.folder = folder 3937 self.db_codec = db_codec 3938 self._after_connection = after_connection 3939 self.find_or_make_work_folder() 3940 connstr = uri.split(':', 1)[1] 3941 # Simple URI processing 3942 connstr = connstr.lstrip() 3943 while connstr.startswith('/'): 3944 connstr = connstr[1:] 3945 if '=' in connstr: 3946 # Assume we have a regular ODBC connection string and just use it 3947 ruri = connstr 3948 else: 3949 # Assume only (local) dbname is passed in with OS auth 3950 database_name = connstr 3951 default_driver_name = 'Ingres' 3952 vnode = '(local)' 3953 servertype = 'ingres' 3954 ruri = 'Driver={%s};Server=%s;Database=%s' % (default_driver_name, vnode, database_name) 3955 def connector(cnxn=ruri,driver_args=driver_args): 3956 return self.driver.connect(cnxn,**driver_args)
3957 3958 self.connector = connector 3959 3960 # TODO if version is >= 10, set types['id'] to Identity column, see http://community.actian.com/wiki/Using_Ingres_Identity_Columns 3961 if do_connect: self.reconnect()
3962
3963 - def create_sequence_and_triggers(self, query, table, **args):
3964 # post create table auto inc code (if needed) 3965 # modify table to btree for performance.... 3966 # Older Ingres releases could use rule/trigger like Oracle above. 3967 if hasattr(table,'_primarykey'): 3968 modify_tbl_sql = 'modify %s to btree unique on %s' % \ 3969 (table._tablename, 3970 ', '.join(["'%s'" % x for x in table.primarykey])) 3971 self.execute(modify_tbl_sql) 3972 else: 3973 tmp_seqname='%s_iisq' % table._tablename 3974 query=query.replace(INGRES_SEQNAME, tmp_seqname) 3975 self.execute('create sequence %s' % tmp_seqname) 3976 self.execute(query) 3977 self.execute('modify %s to btree unique on %s' % (table._tablename, 'id'))
3978 3979
3980 - def lastrowid(self,table):
3981 tmp_seqname='%s_iisq' % table 3982 self.execute('select current value for %s' % tmp_seqname) 3983 return int(self.cursor.fetchone()[0]) # don't really need int type cast here...
3984
3985 - def integrity_error_class(self):
3986 return self._driver.IntegrityError
3987
3988 3989 -class IngresUnicodeAdapter(IngresAdapter):
3990 3991 drivers = ('pyodbc',) 3992 3993 types = { 3994 'boolean': 'CHAR(1)', 3995 'string': 'NVARCHAR(%(length)s)', 3996 'text': 'NCLOB', 3997 'json': 'NCLOB', 3998 'password': 'NVARCHAR(%(length)s)', ## Not sure what this contains utf8 or nvarchar. Or even bytes? 3999 'blob': 'BLOB', 4000 'upload': 'VARCHAR(%(length)s)', ## FIXME utf8 or nvarchar... or blob? what is this type? 4001 'integer': 'INTEGER4', # or int8... 4002 'bigint': 'BIGINT', 4003 'float': 'FLOAT', 4004 'double': 'FLOAT8', 4005 'decimal': 'NUMERIC(%(precision)s,%(scale)s)', 4006 'date': 'ANSIDATE', 4007 'time': 'TIME WITHOUT TIME ZONE', 4008 'datetime': 'TIMESTAMP WITHOUT TIME ZONE', 4009 'id': 'INTEGER4 not null unique with default next value for %s'% INGRES_SEQNAME, 4010 'reference': 'INTEGER4, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 4011 'list:integer': 'NCLOB', 4012 'list:string': 'NCLOB', 4013 'list:reference': 'NCLOB', 4014 'big-id': 'BIGINT not null unique with default next value for %s'% INGRES_SEQNAME, 4015 'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 4016 'reference FK': ', CONSTRAINT FK_%(constraint_name)s FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 4017 'reference TFK': ' CONSTRAINT FK_%(foreign_table)s_PK FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_table)s (%(foreign_key)s) ON DELETE %(on_delete_action)s', ## FIXME TODO 4018 }
4019
4020 -class SAPDBAdapter(BaseAdapter):
4021 drivers = ('sapdb',) 4022 4023 support_distributed_transaction = False 4024 types = { 4025 'boolean': 'CHAR(1)', 4026 'string': 'VARCHAR(%(length)s)', 4027 'text': 'LONG', 4028 'json': 'LONG', 4029 'password': 'VARCHAR(%(length)s)', 4030 'blob': 'LONG', 4031 'upload': 'VARCHAR(%(length)s)', 4032 'integer': 'INT', 4033 'bigint': 'BIGINT', 4034 'float': 'FLOAT', 4035 'double': 'DOUBLE PRECISION', 4036 'decimal': 'FIXED(%(precision)s,%(scale)s)', 4037 'date': 'DATE', 4038 'time': 'TIME', 4039 'datetime': 'TIMESTAMP', 4040 'id': 'INT PRIMARY KEY', 4041 'reference': 'INT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 4042 'list:integer': 'LONG', 4043 'list:string': 'LONG', 4044 'list:reference': 'LONG', 4045 'big-id': 'BIGINT PRIMARY KEY', 4046 'big-reference': 'BIGINT, FOREIGN KEY (%(field_name)s) REFERENCES %(foreign_key)s ON DELETE %(on_delete_action)s', 4047 } 4048
4049 - def sequence_name(self,table):
4050 return '%s_id_Seq' % table
4051
4052 - def select_limitby(self, sql_s, sql_f, sql_t, sql_w, sql_o, limitby):
4053 if limitby: 4054 (lmin, lmax) = limitby 4055 if len(sql_w) > 1: 4056 sql_w_row = sql_w + ' AND w_row > %i' % lmin 4057 else: 4058 sql_w_row = 'WHERE w_row > %i' % lmin 4059 return '%s %s FROM (SELECT w_tmp.*, ROWNO w_row FROM (SELECT %s FROM %s%s%s) w_tmp WHERE ROWNO=%i) %s %s %s;' % (sql_s, sql_f, sql_f, sql_t, sql_w, sql_o, lmax, sql_t, sql_w_row, sql_o) 4060 return 'SELECT %s %s FROM %s%s%s;' % (sql_s, sql_f, sql_t, sql_w, sql_o)
4061
4062 - def create_sequence_and_triggers(self, query, table, **args):
4063 # following lines should only be executed if table._sequence_name does not exist 4064 self.execute('CREATE SEQUENCE %s;' % table._sequence_name) 4065 self.execute("ALTER TABLE %s ALTER COLUMN %s SET DEFAULT NEXTVAL('%s');" \ 4066 % (table._tablename, table._id.name, table._sequence_name)) 4067 self.execute(query)
4068 4069 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:@]+)(\:(?P<port>[0-9]+))?/(?P<db>[^\?]+)(\?sslmode=(?P<sslmode>.+))?$') 4070 4071
4072 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 4073 credential_decoder=IDENTITY, driver_args={}, 4074 adapter_args={}, do_connect=True, after_connection=None):
4075 self.db = db 4076 self.dbengine = "sapdb" 4077 self.uri = uri 4078 if do_connect: self.find_driver(adapter_args,uri) 4079 self.pool_size = pool_size 4080 self.folder = folder 4081 self.db_codec = db_codec 4082 self._after_connection = after_connection 4083 self.find_or_make_work_folder() 4084 ruri = uri.split('://',1)[1] 4085 m = self.REGEX_URI.match(ruri) 4086 if not m: 4087 raise SyntaxError("Invalid URI string in DAL") 4088 user = credential_decoder(m.group('user')) 4089 if not user: 4090 raise SyntaxError('User required') 4091 password = credential_decoder(m.group('password')) 4092 if not password: 4093 password = '' 4094 host = m.group('host') 4095 if not host: 4096 raise SyntaxError('Host name required') 4097 db = m.group('db') 4098 if not db: 4099 raise SyntaxError('Database name required') 4100 def connector(user=user, password=password, database=db, 4101 host=host, driver_args=driver_args): 4102 return self.driver.Connection(user, password, database, 4103 host, **driver_args)
4104 self.connector = connector 4105 if do_connect: self.reconnect()
4106
4107 - def lastrowid(self,table):
4108 self.execute("select %s.NEXTVAL from dual" % table._sequence_name) 4109 return int(self.cursor.fetchone()[0])
4110
4111 -class CubridAdapter(MySQLAdapter):
4112 drivers = ('cubriddb',) 4113 4114 REGEX_URI = re.compile('^(?P<user>[^:@]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:/]+)(\:(?P<port>[0-9]+))?/(?P<db>[^?]+)(\?set_encoding=(?P<charset>\w+))?$') 4115
4116 - def __init__(self, db, uri, pool_size=0, folder=None, db_codec='UTF-8', 4117 credential_decoder=IDENTITY, driver_args={}, 4118 adapter_args={}, do_connect=True, after_connection=None):
4119 self.db = db 4120 self.dbengine = "cubrid" 4121 self.uri = uri 4122 if do_connect: self.find_driver(adapter_args,uri) 4123 self.pool_size = pool_size 4124 self.folder = folder 4125 self.db_codec = db_codec 4126 self._after_connection = after_connection 4127 self.find_or_make_work_folder() 4128 ruri = uri.split('://',1)[1] 4129 m = self.REGEX_URI.match(ruri) 4130 if not m: 4131 raise SyntaxError( 4132 "Invalid URI string in DAL: %s" % self.uri) 4133 user = credential_decoder(m.group('user')) 4134 if not user: 4135 raise SyntaxError('User required') 4136 password = credential_decoder(m.group('password')) 4137 if not password: 4138 password = '' 4139 host = m.group('host') 4140 if not host: 4141 raise SyntaxError('Host name required') 4142 db = m.group('db') 4143 if not db: 4144 raise SyntaxError('Database name required') 4145 port = int(m.group('port') or '30000') 4146 charset = m.group('charset') or 'utf8' 4147 user = credential_decoder(user) 4148 passwd = credential_decoder(password) 4149 def connector(host=host,port=port,db=db, 4150 user=user,passwd=password,driver_args=driver_args): 4151 return self.driver.connect(host,port,db,user,passwd,**driver_args)
4152 self.connector = connector 4153 if do_connect: self.reconnect()
4154
4155 - def after_connection(self):
4156 self.execute('SET FOREIGN_KEY_CHECKS=1;') 4157 self.execute("SET sql_mode='NO_BACKSLASH_ESCAPES';")
4158
4159 4160 ######## GAE MySQL ########## 4161 4162 -class DatabaseStoredFile:
4163 4164 web2py_filesystem = False 4165
4166 - def escape(self,obj):
4167 return self.db._adapter.escape(obj)
4168
4169 - def __init__(self,db,filename,mode):
4170 if not db._adapter.dbengine in ('mysql', 'postgres'): 4171 raise RuntimeError("only MySQL/Postgres can store metadata .table files in database for now") 4172 self.db = db 4173 self.filename = filename 4174 self.mode = mode 4175 if not self.web2py_filesystem: 4176 if db._adapter.dbengine == 'mysql': 4177 sql = "CREATE TABLE IF NOT EXISTS web2py_filesystem (path VARCHAR(255), content LONGTEXT, PRIMARY KEY(path) ) ENGINE=InnoDB;" 4178 elif db._adapter.dbengine == 'postgres': 4179 sql = "CREATE TABLE IF NOT EXISTS web2py_filesystem (path VARCHAR(255), content TEXT, PRIMARY KEY(path));" 4180 self.db.executesql(sql) 4181 DatabaseStoredFile.web2py_filesystem = True 4182 self.p=0 4183 self.data = '' 4184 if mode in ('r','rw','a'): 4185 query = "SELECT content FROM web2py_filesystem WHERE path='%s'" \ 4186 % filename 4187 rows = self.db.executesql(query) 4188 if rows: 4189 self.data = rows[0][0] 4190 elif exists(filename): 4191 datafile = open(filename, 'r') 4192 try: 4193 self.data = datafile.read() 4194 finally: 4195 datafile.close() 4196 elif mode in ('r','rw'): 4197 raise RuntimeError("File %s does not exist" % filename)
4198
4199 - def read(self, bytes):
4200 data = self.data[self.p:self.p+bytes] 4201 self.p += len(data) 4202 return data
4203
4204 - def readline(self):
4205 i = self.data.find('\n',self.p)+1 4206 if i>0: 4207 data, self.p = self.data[self.p:i], i 4208 else: 4209 data, self.p = self.data[self.p:], len(self.data) 4210 return data
4211
4212 - def write(self,data):
4213 self.data += data
4214
4215 - def close_connection(self):
4216 if self.db is not None: 4217 self.db.executesql( 4218 "DELETE FROM web2py_filesystem WHERE path='%s'" % self.filename) 4219 query = "INSERT INTO web2py_filesystem(path,content) VALUES ('%s','%s')"\ 4220 % (self.filename, self.data.replace("'","''")) 4221 self.db.executesql(query) 4222 self.db.commit() 4223 self.db = None
4224
4225 - def close(self):
4226 self.close_connection()
4227 4228 @staticmethod
4229 - def exists(db, filename):
4230 if exists(filename): 4231 return True 4232 query = "SELECT path FROM web2py_filesystem WHERE path='%s'" % filename 4233 if db.executesql(query): 4234 return True 4235 return False
4236
4237 4238 -class UseDatabaseStoredFile:
4239
4240 - def file_exists(self, filename):
4241 return DatabaseStoredFile.exists(self.db,filename)
4242
4243 - def file_open(self, filename, mode='rb', lock=True):
4244 return DatabaseStoredFile(self.db,filename,mode)
4245
4246 - def file_close(self, fileobj):
4247 fileobj.close_connection()
4248
4249 - def file_delete(self,filename):
4250 query = "DELETE FROM web2py_filesystem WHERE path='%s'" % filename 4251 self.db.executesql(query) 4252 self.db.commit()
4253
4254 -class GoogleSQLAdapter(UseDatabaseStoredFile,MySQLAdapter):
4255 uploads_in_blob = True 4256 4257 REGEX_URI = re.compile('^(?P<instance>.*)/(?P<db>.*)$') 4258
4259 - def __init__(self, db, uri='google:sql://realm:domain/database', 4260 pool_size=0, folder=None, db_codec='UTF-8', 4261 credential_decoder=IDENTITY, driver_args={}, 4262 adapter_args={}, do_connect=True, after_connection=None):
4263 4264 self.db = db 4265 self.dbengine = "mysql" 4266 self.uri = uri 4267 self.pool_size = pool_size 4268 self.db_codec = db_codec 4269 self._after_connection = after_connection 4270 self.folder = folder or pjoin('$HOME',THREAD_LOCAL.folder.split( 4271 os.sep+'applications'+os.sep,1)[1]) 4272 ruri = uri.split("://")[1] 4273 m = self.REGEX_URI.match(ruri) 4274 if not m: 4275 raise SyntaxError("Invalid URI string in SQLDB: %s" % self.uri) 4276 instance = credential_decoder(m.group('instance')) 4277 self.dbstring = db = credential_decoder(m.group('db')) 4278 driver_args['instance'] = instance 4279 if not 'charset' in driver_args: 4280 driver_args['charset'] = 'utf8' 4281 self.createdb = createdb = adapter_args.get('createdb',True) 4282 if not createdb: 4283 driver_args['database'] = db 4284 def connector(driver_args=driver_args): 4285 return rdbms.connect(**driver_args)
4286 self.connector = connector 4287 if do_connect: self.reconnect()
4288
4289 - def after_connection(self):
4290 if self.createdb: 4291 # self.execute('DROP DATABASE %s' % self.dbstring) 4292 self.execute('CREATE DATABASE IF NOT EXISTS %s' % self.dbstring) 4293 self.execute('USE %s' % self.dbstring) 4294 self.execute("SET FOREIGN_KEY_CHECKS=1;") 4295 self.execute("SET sql_mode='NO_BACKSLASH_ESCAPES';")
4296
4297 - def execute(self, command, *a, **b):
4298 return self.log_execute(command.decode('utf8'), *a, **b)
4299
4300 -class NoSQLAdapter(BaseAdapter):
4301 can_select_for_update = False 4302 4303 @staticmethod
4304 - def to_unicode(obj):
4305 if isinstance(obj, str): 4306 return obj.decode('utf8') 4307 elif not isinstance(obj, unicode): 4308 return unicode(obj) 4309 return obj
4310
4311 - def id_query(self, table):
4312 return table._id > 0
4313
4314 - def represent(self, obj, fieldtype):
4315 field_is_type = fieldtype.startswith 4316 if isinstance(obj, CALLABLETYPES): 4317 obj = obj() 4318 if isinstance(fieldtype, SQLCustomType): 4319 return fieldtype.encoder(obj) 4320 if isinstance(obj, (Expression, Field)): 4321 raise SyntaxError("non supported on GAE") 4322 if self.dbengine == 'google:datastore': 4323 if isinstance(fieldtype, gae.Property): 4324 return obj 4325 is_string = isinstance(fieldtype,str) 4326 is_list = is_string and field_is_type('list:') 4327 if is_list: 4328 if not obj: 4329 obj = [] 4330 if not isinstance(obj, (list, tuple)): 4331 obj = [obj] 4332 if obj == '' and not \ 4333 (is_string and fieldtype[:2] in ['st','te', 'pa','up']): 4334 return None 4335 if not obj is None: 4336 if isinstance(obj, list) and not is_list: 4337 obj = [self.represent(o, fieldtype) for o in obj] 4338 elif fieldtype in ('integer','bigint','id'): 4339 obj = long(obj) 4340 elif fieldtype == 'double': 4341 obj = float(obj) 4342 elif is_string and field_is_type('reference'): 4343 if isinstance(obj, (Row, Reference)): 4344 obj = obj['id'] 4345 obj = long(obj) 4346 elif fieldtype == 'boolean': 4347 if obj and not str(obj)[0].upper() in '0F': 4348 obj = True 4349 else: 4350 obj = False 4351 elif fieldtype == 'date': 4352 if not isinstance(obj, datetime.date): 4353 (y, m, d) = map(int,str(obj).strip().split('-')) 4354 obj = datetime.date(y, m, d) 4355 elif isinstance(obj,datetime.datetime): 4356 (y, m, d) = (obj.year, obj.month, obj.day) 4357 obj = datetime.date(y, m, d) 4358 elif fieldtype == 'time': 4359 if not isinstance(obj, datetime.time): 4360 time_items = map(int,str(obj).strip().split(':')[:3]) 4361 if len(time_items) == 3: 4362 (h, mi, s) = time_items 4363 else: 4364 (h, mi, s) = time_items + [0] 4365 obj = datetime.time(h, mi, s) 4366 elif fieldtype == 'datetime': 4367 if not isinstance(obj, datetime.datetime): 4368 (y, m, d) = map(int,str(obj)[:10].strip().split('-')) 4369 time_items = map(int,str(obj)[11:].strip().split(':')[:3]) 4370 while len(time_items)<3: 4371 time_items.append(0) 4372 (h, mi, s) = time_items 4373 obj = datetime.datetime(y, m, d, h, mi, s) 4374 elif fieldtype == 'blob': 4375 pass 4376 elif fieldtype == 'json': 4377 if isinstance(obj, basestring): 4378 obj = self.to_unicode(obj) 4379 if have_serializers: 4380 obj = serializers.loads_json(obj) 4381 elif simplejson: 4382 obj = simplejson.loads(obj) 4383 else: 4384 raise RuntimeError("missing simplejson") 4385 elif is_string and field_is_type('list:string'): 4386 return map(self.to_unicode,obj) 4387 elif is_list: 4388 return map(int,obj) 4389 else: 4390 obj = self.to_unicode(obj) 4391 return obj
4392
4393 - def _insert(self,table,fields):
4394 return 'insert %s in %s' % (fields, table)
4395
4396 - def _count(self,query,distinct=None):
4397 return 'count %s' % repr(query)
4398
4399 - def _select(self,query,fields,attributes):
4400 return 'select %s where %s' % (repr(fields), repr(query))
4401
4402 - def _delete(self,tablename, query):
4403 return 'delete %s where %s' % (repr(tablename),repr(query))
4404
4405 - def _update(self,tablename,query,fields):
4406 return 'update %s (%s) where %s' % (repr(tablename), 4407 repr(fields),repr(query))
4408
4409 - def commit(self):
4410 """ 4411 remember: no transactions on many NoSQL 4412 """ 4413 pass
4414
4415 - def rollback(self):
4416 """ 4417 remember: no transactions on many NoSQL 4418 """ 4419 pass
4420
4421 - def close_connection(self):
4422 """ 4423 remember: no transactions on many NoSQL 4424 """ 4425 pass
4426 4427 4428 # these functions should never be called!
4429 - def OR(self,first,second): raise SyntaxError("Not supported")
4430 - def AND(self,first,second): raise SyntaxError("Not supported")
4431 - def AS(self,first,second): raise SyntaxError("Not supported")
4432 - def ON(self,first,second): raise SyntaxError("Not supported")
4433 - def STARTSWITH(self,first,second=None): raise SyntaxError("Not supported")
4434 - def ENDSWITH(self,first,second=None): raise SyntaxError("Not supported")
4435 - def ADD(self,first,second): raise SyntaxError("Not supported")
4436 - def SUB(self,first,second): raise SyntaxError("Not supported")
4437 - def MUL(self,first,second): raise SyntaxError("Not supported")
4438 - def DIV(self,first,second): raise SyntaxError("Not supported")
4439 - def LOWER(self,first): raise SyntaxError("Not supported")
4440 - def UPPER(self,first): raise SyntaxError("Not supported")
4441 - def EXTRACT(self,first,what): raise SyntaxError("Not supported")
4442 - def LENGTH(self, first): raise SyntaxError("Not supported")
4443 - def AGGREGATE(self,first,what): raise SyntaxError("Not supported")
4444 - def LEFT_JOIN(self): raise SyntaxError("Not supported")
4445 - def RANDOM(self): raise SyntaxError("Not supported")
4446 - def SUBSTRING(self,field,parameters): raise SyntaxError("Not supported")
4447 - def PRIMARY_KEY(self,key): raise SyntaxError("Not supported")
4448 - def ILIKE(self,first,second): raise SyntaxError("Not supported")
4449 - def drop(self,table,mode): raise SyntaxError("Not supported")
4450 - def alias(self,table,alias): raise SyntaxError("Not supported")
4451 - def migrate_table(self,*a,**b): raise SyntaxError("Not supported")
4452 - def distributed_transaction_begin(self,key): raise SyntaxError("Not supported")
4453 - def prepare(self,key): raise SyntaxError("Not supported")
4454 - def commit_prepared(self,key): raise SyntaxError("Not supported")
4455 - def rollback_prepared(self,key): raise SyntaxError("Not supported")
4456 - def concat_add(self,table): raise SyntaxError("Not supported")
4457 - def constraint_name(self, table, fieldname): raise SyntaxError("Not supported")
4458 - def create_sequence_and_triggers(self, query, table, **args): pass
4459 - def log_execute(self,*a,**b): raise SyntaxError("Not supported")
4460 - def execute(self,*a,**b): raise SyntaxError("Not supported")
4461 - def represent_exceptions(self, obj, fieldtype): raise SyntaxError("Not supported")
4462 - def lastrowid(self,table): raise SyntaxError("Not supported")
4463 - def integrity_error_class(self): raise SyntaxError("Not supported")
4464 - def rowslice(self,rows,minimum=0,maximum=None): raise SyntaxError("Not supported")
4465
4466 4467 -class GAEF(object):
4468 - def __init__(self,name,op,value,apply):
4469 self.name=name=='id' and '__key__' or name 4470 self.op=op 4471 self.value=value 4472 self.apply=apply
4473 - def __repr__(self):
4474 return '(%s %s %s:%s)' % (self.name, self.op, repr(self.value), type(self.value))
4475
4476 -class GoogleDatastoreAdapter(NoSQLAdapter):
4477 uploads_in_blob = True 4478 types = {} 4479
4480 - def file_exists(self, filename): pass
4481 - def file_open(self, filename, mode='rb', lock=True): pass
4482 - def file_close(self, fileobj): pass
4483 4484 REGEX_NAMESPACE = re.compile('.*://(?P<namespace>.+)') 4485
4486 - def __init__(self,db,uri,pool_size=0,folder=None,db_codec ='UTF-8', 4487 credential_decoder=IDENTITY, driver_args={}, 4488 adapter_args={}, do_connect=True, after_connection=None):
4489 self.types.update({ 4490 'boolean': gae.BooleanProperty, 4491 'string': (lambda **kwargs: gae.StringProperty(multiline=True, **kwargs)), 4492 'text': gae.TextProperty, 4493 'json': gae.TextProperty, 4494 'password': gae.StringProperty, 4495 'blob': gae.BlobProperty, 4496 'upload': gae.StringProperty, 4497 'integer': gae.IntegerProperty, 4498 'bigint': gae.IntegerProperty, 4499 'float': gae.FloatProperty, 4500 'double': gae.FloatProperty, 4501 'decimal': GAEDecimalProperty, 4502 'date': gae.DateProperty, 4503 'time': gae.TimeProperty, 4504 'datetime': gae.DateTimeProperty, 4505 'id': None, 4506 'reference': gae.IntegerProperty, 4507 'list:string': (lambda **kwargs: gae.StringListProperty(default=None, **kwargs)), 4508 'list:integer': (lambda **kwargs: gae.ListProperty(int,default=None, **kwargs)), 4509 'list:reference': (lambda **kwargs: gae.ListProperty(int,default=None, **kwargs)), 4510 }) 4511 self.db = db 4512 self.uri = uri 4513 self.dbengine = 'google:datastore' 4514 self.folder = folder 4515 db['_lastsql'] = '' 4516 self.db_codec = 'UTF-8' 4517 self._after_connection = after_connection 4518 self.pool_size = 0 4519 match = self.REGEX_NAMESPACE.match(uri) 4520 if match: 4521 namespace_manager.set_namespace(match.group('namespace'))
4522
4523 - def parse_id(self, value, field_type):
4524 return value
4525
4526 - def create_table(self,table,migrate=True,fake_migrate=False, polymodel=None):
4527 myfields = {} 4528 for field in table: 4529 if isinstance(polymodel,Table) and field.name in polymodel.fields(): 4530 continue 4531 attr = {} 4532 if isinstance(field.custom_qualifier, dict): 4533 #this is custom properties to add to the GAE field declartion 4534 attr = field.custom_qualifier 4535 field_type = field.type 4536 if isinstance(field_type, SQLCustomType): 4537 ftype = self.types[field_type.native or field_type.type](**attr) 4538 elif isinstance(field_type, gae.Property): 4539 ftype = field_type 4540 elif field_type.startswith('id'): 4541 continue 4542 elif field_type.startswith('decimal'): 4543 precision, scale = field_type[7:].strip('()').split(',') 4544 precision = int(precision) 4545 scale = int(scale) 4546 ftype = GAEDecimalProperty(precision, scale, **attr) 4547 elif field_type.startswith('reference'): 4548 if field.notnull: 4549 attr = dict(required=True) 4550 referenced = field_type[10:].strip() 4551 ftype = self.types[field_type[:9]](referenced, **attr) 4552 elif field_type.startswith('list:reference'): 4553 if field.notnull: 4554 attr['required'] = True 4555 referenced = field_type[15:].strip() 4556 ftype = self.types[field_type[:14]](**attr) 4557 elif field_type.startswith('list:'): 4558 ftype = self.types[field_type](**attr) 4559 elif not field_type in self.types\ 4560 or not self.types[field_type]: 4561 raise SyntaxError('Field: unknown field type: %s' % field_type) 4562 else: 4563 ftype = self.types[field_type](**attr) 4564 myfields[field.name] = ftype 4565 if not polymodel: 4566 table._tableobj = classobj(table._tablename, (gae.Model, ), myfields) 4567 elif polymodel==True: 4568 table._tableobj = classobj(table._tablename, (PolyModel, ), myfields) 4569 elif isinstance(polymodel,Table): 4570 table._tableobj = classobj(table._tablename, (polymodel._tableobj, ), myfields) 4571 else: 4572 raise SyntaxError("polymodel must be None, True, a table or a tablename") 4573 return None
4574
4575 - def expand(self,expression,field_type=None):
4576 if isinstance(expression,Field): 4577 if expression.type in ('text', 'blob', 'json'): 4578 raise SyntaxError('AppEngine does not index by: %s' % expression.type) 4579 return expression.name 4580 elif isinstance(expression, (Expression, Query)): 4581 if not expression.second is None: 4582 return expression.op(expression.first, expression.second) 4583 elif not expression.first is None: 4584 return expression.op(expression.first) 4585 else: 4586 return expression.op() 4587 elif field_type: 4588 return self.represent(expression,field_type) 4589 elif isinstance(expression,(list,tuple)): 4590 return ','.join([self.represent(item,field_type) for item in expression]) 4591 else: 4592 return str(expression)
4593 4594 ### TODO from gql.py Expression
4595 - def AND(self,first,second):
4596 a = self.expand(first) 4597 b = self.expand(second) 4598 if b[0].name=='__key__' and a[0].name!='__key__': 4599 return b+a 4600 return a+b
4601
4602 - def EQ(self,first,second=None):
4603 if isinstance(second, Key): 4604 return [GAEF(first.name,'=',second,lambda a,b:a==b)] 4605 return [GAEF(first.name,'=',self.represent(second,first.type),lambda a,b:a==b)]
4606
4607 - def NE(self,first,second=None):
4608 if first.type != 'id': 4609 return [GAEF(first.name,'!=',self.represent(second,first.type),lambda a,b:a!=b)] 4610 else: 4611 if not second is None: 4612 second = Key.from_path(first._tablename, long(second)) 4613 return [GAEF(first.name,'!=',second,lambda a,b:a!=b)]
4614
4615 - def LT(self,first,second=None):
4616 if first.type != 'id': 4617 return [GAEF(first.name,'<',self.represent(second,first.type),lambda a,b:a<b)] 4618 else: 4619 second = Key.from_path(first._tablename, long(second)) 4620 return [GAEF(first.name,'<',second,lambda a,b:a<b)]
4621
4622 - def LE(self,first,second=None):
4623 if first.type != 'id': 4624 return [GAEF(first.name,'<=',self.represent(second,first.type),lambda a,b:a<=b)] 4625 else: 4626 second = Key.from_path(first._tablename, long(second)) 4627 return [GAEF(first.name,'<=',second,lambda a,b:a<=b)]
4628
4629 - def GT(self,first,second=None):
4630 if first.type != 'id' or second==0 or second == '0': 4631 return [GAEF(first.name,'>',self.represent(second,first.type),lambda a,b:a>b)] 4632 else: 4633 second = Key.from_path(first._tablename, long(second)) 4634 return [GAEF(first.name,'>',second,lambda a,b:a>b)]
4635
4636 - def GE(self,first,second=None):
4637 if first.type != 'id': 4638 return [GAEF(first.name,'>=',self.represent(second,first.type),lambda a,b:a>=b)] 4639 else: 4640 second = Key.from_path(first._tablename, long(second)) 4641 return [GAEF(first.name,'>=',second,lambda a,b:a>=b)]
4642
4643 - def INVERT(self,first):
4644 return '-%s' % first.name
4645
4646 - def COMMA(self,first,second):
4647 return '%s, %s' % (self.expand(first),self.expand(second))
4648
4649 - def BELONGS(self,first,second=None):
4650 if not isinstance(second,(list, tuple)): 4651 raise SyntaxError("Not supported") 4652 if first.type != 'id': 4653 return [GAEF(first.name,'in',self.represent(second,first.type),lambda a,b:a in b)] 4654 else: 4655 second = [Key.from_path(first._tablename, int(i)) for i in second] 4656 return [GAEF(first.name,'in',second,lambda a,b:a in b)]
4657
4658 - def CONTAINS(self,first,second,case_sensitive=False):
4659 # silently ignoring: GAE can only do case sensitive matches! 4660 if not first.type.startswith('list:'): 4661 raise SyntaxError("Not supported") 4662 return [GAEF(first.name,'=',self.expand(second,first.type[5:]),lambda a,b:b in a)]
4663
4664 - def NOT(self,first):
4665 nops = { self.EQ: self.NE, 4666 self.NE: self.EQ, 4667 self.LT: self.GE, 4668 self.GT: self.LE, 4669 self.LE: self.GT, 4670 self.GE: self.LT} 4671 if not isinstance(first,Query): 4672 raise SyntaxError("Not suported") 4673 nop = nops.get(first.op,None) 4674 if not nop: 4675 raise SyntaxError("Not suported %s" % first.op.__name__) 4676 first.op = nop 4677 return self.expand(first)
4678
4679 - def truncate(self,table,mode):
4680 self.db(self.db._adapter.id_query(table)).delete()
4681
4682 - def select_raw(self,query,fields=None,attributes=None):
4683 db = self.db 4684 fields = fields or [] 4685 attributes = attributes or {} 4686 args_get = attributes.get 4687 new_fields = [] 4688 for item in fields: 4689 if isinstance(item,SQLALL): 4690 new_fields += item._table 4691 else: 4692 new_fields.append(item) 4693 fields = new_fields 4694 if query: 4695 tablename = self.get_table(query) 4696 elif fields: 4697 tablename = fields[0].tablename 4698 query = db._adapter.id_query(fields[0].table) 4699 else: 4700 raise SyntaxError("Unable to determine a tablename") 4701 4702 if query: 4703 if use_common_filters(query): 4704 query = self.common_filter(query,[tablename]) 4705 4706 #tableobj is a GAE Model class (or subclass) 4707 tableobj = db[tablename]._tableobj 4708 filters = self.expand(query) 4709 4710 projection = None 4711 if len(db[tablename].fields) == len(fields): 4712 #getting all fields, not a projection query 4713 projection = None 4714 elif args_get('projection') == True: 4715 projection = [] 4716 for f in fields: 4717 if f.type in ['text', 'blob', 'json']: 4718 raise SyntaxError( 4719 "text and blob field types not allowed in projection queries") 4720 else: 4721 projection.append(f.name) 4722 4723 # projection's can't include 'id'. 4724 # it will be added to the result later 4725 query_projection = [ 4726 p for p in projection if \ 4727 p != db[tablename]._id.name] if projection \ 4728 else None 4729 4730 cursor = None 4731 if isinstance(args_get('reusecursor'), str): 4732 cursor = args_get('reusecursor') 4733 items = gae.Query(tableobj, projection=query_projection, 4734 cursor=cursor) 4735 4736 for filter in filters: 4737 if args_get('projection') == True and \ 4738 filter.name in query_projection and \ 4739 filter.op in ['=', '<=', '>=']: 4740 raise SyntaxError( 4741 "projection fields cannot have equality filters") 4742 if filter.name=='__key__' and filter.op=='>' and filter.value==0: 4743 continue 4744 elif filter.name=='__key__' and filter.op=='=': 4745 if filter.value==0: 4746 items = [] 4747 elif isinstance(filter.value, Key): 4748 # key qeuries return a class instance, 4749 # can't use projection 4750 # extra values will be ignored in post-processing later 4751 item = tableobj.get(filter.value) 4752 items = (item and [item]) or [] 4753 else: 4754 # key qeuries return a class instance, 4755 # can't use projection 4756 # extra values will be ignored in post-processing later 4757 item = tableobj.get_by_id(filter.value) 4758 items = (item and [item]) or [] 4759 elif isinstance(items,list): # i.e. there is a single record! 4760 items = [i for i in items if filter.apply( 4761 getattr(item,filter.name),filter.value)] 4762 else: 4763 if filter.name=='__key__' and filter.op != 'in': 4764 items.order('__key__') 4765 items = items.filter('%s %s' % (filter.name,filter.op), 4766 filter.value) 4767 if not isinstance(items,list): 4768 if args_get('left', None): 4769 raise SyntaxError('Set: no left join in appengine') 4770 if args_get('groupby', None): 4771 raise SyntaxError('Set: no groupby in appengine') 4772 orderby = args_get('orderby', False) 4773 if orderby: 4774 ### THIS REALLY NEEDS IMPROVEMENT !!! 4775 if isinstance(orderby, (list, tuple)): 4776 orderby = xorify(orderby) 4777 if isinstance(orderby,Expression): 4778 orderby = self.expand(orderby) 4779 orders = orderby.split(', ') 4780 for order in orders: 4781 order={'-id':'-__key__','id':'__key__'}.get(order,order) 4782 items = items.order(order) 4783 if args_get('limitby', None): 4784 (lmin, lmax) = attributes['limitby'] 4785 (limit, offset) = (lmax - lmin, lmin) 4786 rows = items.fetch(limit,offset=offset) 4787 #cursor is only useful if there was a limit and we didn't return 4788 # all results 4789 if args_get('reusecursor'): 4790 db['_lastcursor'] = items.cursor() 4791 items = rows 4792 return (items, tablename, projection or db[tablename].fields)
4793
4794 - def select(self,query,fields,attributes):
4795 """ 4796 This is the GAE version of select. some notes to consider: 4797 - db['_lastsql'] is not set because there is not SQL statement string 4798 for a GAE query 4799 - 'nativeRef' is a magical fieldname used for self references on GAE 4800 - optional attribute 'projection' when set to True will trigger 4801 use of the GAE projection queries. note that there are rules for 4802 what is accepted imposed by GAE: each field must be indexed, 4803 projection queries cannot contain blob or text fields, and you 4804 cannot use == and also select that same field. see https://developers.google.com/appengine/docs/python/datastore/queries#Query_Projection 4805 - optional attribute 'reusecursor' allows use of cursor with queries 4806 that have the limitby attribute. Set the attribute to True for the 4807 first query, set it to the value of db['_lastcursor'] to continue 4808 a previous query. The user must save the cursor value between 4809 requests, and the filters must be identical. It is up to the user 4810 to follow google's limitations: https://developers.google.com/appengine/docs/python/datastore/queries#Query_Cursors 4811 """ 4812 4813 (items, tablename, fields) = self.select_raw(query,fields,attributes) 4814 # self.db['_lastsql'] = self._select(query,fields,attributes) 4815 rows = [[(t==self.db[tablename]._id.name and item) or \ 4816 (t=='nativeRef' and item) or getattr(item, t) \ 4817 for t in fields] for item in items] 4818 colnames = ['%s.%s' % (tablename, t) for t in fields] 4819 processor = attributes.get('processor',self.parse) 4820 return processor(rows,fields,colnames,False)
4821
4822 - def count(self,query,distinct=None,limit=None):
4823 if distinct: 4824 raise RuntimeError("COUNT DISTINCT not supported") 4825 (items, tablename, fields) = self.select_raw(query) 4826 # self.db['_lastsql'] = self._count(query) 4827 try: 4828 return len(items) 4829 except TypeError: 4830 return items.count(limit=limit)
4831
4832 - def delete(self,tablename, query):
4833 """ 4834 This function was changed on 2010-05-04 because according to 4835 http://code.google.com/p/googleappengine/issues/detail?id=3119 4836 GAE no longer supports deleting more than 1000 records. 4837 """ 4838 # self.db['_lastsql'] = self._delete(tablename,query) 4839 (items, tablename, fields) = self.select_raw(query) 4840 # items can be one item or a query 4841 if not isinstance(items,list): 4842 #use a keys_only query to ensure that this runs as a datastore 4843 # small operations 4844 leftitems = items.fetch(1000, keys_only=True) 4845 counter = 0 4846 while len(leftitems): 4847 counter += len(leftitems) 4848 gae.delete(leftitems) 4849 leftitems = items.fetch(1000, keys_only=True) 4850 else: 4851 counter = len(items) 4852 gae.delete(items) 4853 return counter
4854
4855 - def update(self,tablename,query,update_fields):
4856 # self.db['_lastsql'] = self._update(tablename,query,update_fields) 4857 (items, tablename, fields) = self.select_raw(query) 4858 counter = 0 4859 for item in items: 4860 for field, value in update_fields: 4861 setattr(item, field.name, self.represent(value,field.type)) 4862 item.put() 4863 counter += 1 4864 LOGGER.info(str(counter)) 4865 return counter
4866
4867 - def insert(self,table,fields):
4868 dfields=dict((f.name,self.represent(v,f.type)) for f,v in fields) 4869 # table._db['_lastsql'] = self._insert(table,fields) 4870 tmp = table._tableobj(**dfields) 4871 tmp.put() 4872 rid = Reference(tmp.key().id()) 4873 (rid._table, rid._record, rid._gaekey) = (table, None, tmp.key()) 4874 return rid
4875
4876 - def bulk_insert(self,table,items):
4877 parsed_items = [] 4878 for item in items: 4879 dfields=dict((f.name,self.represent(v,f.type)) for f,v in item) 4880 parsed_items.append(table._tableobj(**dfields)) 4881 gae.put(parsed_items) 4882 return True
4883
4884 -def uuid2int(uuidv):
4885 return uuid.UUID(uuidv).int
4886
4887 -def int2uuid(n):
4888 return str(uuid.UUID(int=n))
4889
4890 -class CouchDBAdapter(NoSQLAdapter):
4891 drivers = ('couchdb',) 4892 4893 uploads_in_blob = True 4894 types = { 4895 'boolean': bool, 4896 'string': str, 4897 'text': str, 4898 'json': str, 4899 'password': str, 4900 'blob': str, 4901 'upload': str, 4902 'integer': long, 4903 'bigint': long, 4904 'float': float, 4905 'double': float, 4906 'date': datetime.date, 4907 'time': datetime.time, 4908 'datetime': datetime.datetime, 4909 'id': long, 4910 'reference': long, 4911 'list:string': list, 4912 'list:integer': list, 4913 'list:reference': list, 4914 } 4915
4916 - def file_exists(self, filename): pass
4917 - def file_open(self, filename, mode='rb', lock=True): pass
4918 - def file_close(self, fileobj): pass
4919
4920 - def expand(self,expression,field_type=None):
4921 if isinstance(expression,Field): 4922 if expression.type=='id': 4923 return "%s._id" % expression.tablename 4924 return BaseAdapter.expand(self,expression,field_type)
4925
4926 - def AND(self,first,second):
4927 return '(%s && %s)' % (self.expand(first),self.expand(second))
4928
4929 - def OR(self,first,second):
4930 return '(%s || %s)' % (self.expand(first),self.expand(second))
4931
4932 - def EQ(self,first,second):
4933 if second is None: 4934 return '(%s == null)' % self.expand(first) 4935 return '(%s == %s)' % (self.expand(first),self.expand(second,first.type))
4936
4937 - def NE(self,first,second):
4938 if second is None: 4939 return '(%s != null)' % self.expand(first) 4940 return '(%s != %s)' % (self.expand(first),self.expand(second,first.type))
4941
4942 - def COMMA(self,first,second):
4943 return '%s + %s' % (self.expand(first),self.expand(second))
4944
4945 - def represent(self, obj, fieldtype):
4946 value = NoSQLAdapter.represent(self, obj, fieldtype) 4947 if fieldtype=='id': 4948 return repr(str(int(value))) 4949 elif fieldtype in ('date','time','datetime','boolean'): 4950 return serializers.json(value) 4951 return repr(not isinstance(value,unicode) and value \ 4952 or value and value.encode('utf8'))
4953
4954 - def __init__(self,db,uri='couchdb://127.0.0.1:5984', 4955 pool_size=0,folder=None,db_codec ='UTF-8', 4956 credential_decoder=IDENTITY, driver_args={}, 4957 adapter_args={}, do_connect=True, after_connection=None):
4958 self.db = db 4959 self.uri = uri 4960 if do_connect: self.find_driver(adapter_args) 4961 self.dbengine = 'couchdb' 4962 self.folder = folder 4963 db['_lastsql'] = '' 4964 self.db_codec = 'UTF-8' 4965 self._after_connection = after_connection 4966 self.pool_size = pool_size 4967 4968 url='http://'+uri[10:] 4969 def connector(url=url,driver_args=driver_args): 4970 return self.driver.Server(url,**driver_args)
4971 self.reconnect(connector,cursor=False)
4972
4973 - def create_table(self, table, migrate=True, fake_migrate=False, polymodel=None):
4974 if migrate: 4975 try: 4976 self.connection.create(table._tablename) 4977 except: 4978 pass
4979
4980 - def insert(self,table,fields):
4981 id = uuid2int(web2py_uuid()) 4982 ctable = self.connection[table._tablename] 4983 values = dict((k.name,self.represent(v,k.type)) for k,v in fields) 4984 values['_id'] = str(id) 4985 ctable.save(values) 4986 return id
4987
4988 - def _select(self,query,fields,attributes):
4989 if not isinstance(query,Query): 4990 raise SyntaxError("Not Supported") 4991 for key in set(attributes.keys())-SELECT_ARGS: 4992 raise SyntaxError('invalid select attribute: %s' % key) 4993 new_fields=[] 4994 for item in fields: 4995 if isinstance(item,SQLALL): 4996 new_fields += item._table 4997 else: 4998 new_fields.append(item) 4999 def uid(fd): 5000 return fd=='id' and '_id' or fd
5001 def get(row,fd): 5002 return fd=='id' and int(row['_id']) or row.get(fd,None) 5003 fields = new_fields 5004 tablename = self.get_table(query) 5005 fieldnames = [f.name for f in (fields or self.db[tablename])] 5006 colnames = ['%s.%s' % (tablename,k) for k in fieldnames] 5007 fields = ','.join(['%s.%s' % (tablename,uid(f)) for f in fieldnames]) 5008 fn="(function(%(t)s){if(%(query)s)emit(%(order)s,[%(fields)s]);})" %\ 5009 dict(t=tablename, 5010 query=self.expand(query), 5011 order='%s._id' % tablename, 5012 fields=fields) 5013 return fn, colnames 5014
5015 - def select(self,query,fields,attributes):
5016 if not isinstance(query,Query): 5017 raise SyntaxError("Not Supported") 5018 fn, colnames = self._select(query,fields,attributes) 5019 tablename = colnames[0].split('.')[0] 5020 ctable = self.connection[tablename] 5021 rows = [cols['value'] for cols in ctable.query(fn)] 5022 processor = attributes.get('processor',self.parse) 5023 return processor(rows,fields,colnames,False)
5024
5025 - def delete(self,tablename,query):
5026 if not isinstance(query,Query): 5027 raise SyntaxError("Not Supported") 5028 if query.first.type=='id' and query.op==self.EQ: 5029 id = query.second 5030 tablename = query.first.tablename 5031 assert(tablename == query.first.tablename) 5032 ctable = self.connection[tablename] 5033 try: 5034 del ctable[str(id)] 5035 return 1 5036 except couchdb.http.ResourceNotFound: 5037 return 0 5038 else: 5039 tablename = self.get_table(query) 5040 rows = self.select(query,[self.db[tablename]._id],{}) 5041 ctable = self.connection[tablename] 5042 for row in rows: 5043 del ctable[str(row.id)] 5044 return len(rows)
5045
5046 - def update(self,tablename,query,fields):
5047 if not isinstance(query,Query): 5048 raise SyntaxError("Not Supported") 5049 if query.first.type=='id' and query.op==self.EQ: 5050 id = query.second 5051 tablename = query.first.tablename 5052 ctable = self.connection[tablename] 5053 try: 5054 doc = ctable[str(id)] 5055 for key,value in fields: 5056 doc[key.name] = self.represent(value,self.db[tablename][key.name].type) 5057 ctable.save(doc) 5058 return 1 5059 except couchdb.http.ResourceNotFound: 5060 return 0 5061 else: 5062 tablename = self.get_table(query) 5063 rows = self.select(query,[self.db[tablename]._id],{}) 5064 ctable = self.connection[tablename] 5065 table = self.db[tablename] 5066 for row in rows: 5067 doc = ctable[str(row.id)] 5068 for key,value in fields: 5069 doc[key.name] = self.represent(value,table[key.name].type) 5070 ctable.save(doc) 5071 return len(rows)
5072
5073 - def count(self,query,distinct=None):
5074 if distinct: 5075 raise RuntimeError("COUNT DISTINCT not supported") 5076 if not isinstance(query,Query): 5077 raise SyntaxError("Not Supported") 5078 tablename = self.get_table(query) 5079 rows = self.select(query,[self.db[tablename]._id],{}) 5080 return len(rows)
5081
5082 -def cleanup(text):
5083 """ 5084 validates that the given text is clean: only contains [0-9a-zA-Z_] 5085 """ 5086 if not REGEX_ALPHANUMERIC.match(text): 5087 raise SyntaxError('invalid table or field name: %s' % text) 5088 return text
5089
5090 -class MongoDBAdapter(NoSQLAdapter):
5091 native_json = True 5092 drivers = ('pymongo',) 5093 5094 uploads_in_blob = True 5095 5096 types = { 5097 'boolean': bool, 5098 'string': str, 5099 'text': str, 5100 'json': str, 5101 'password': str, 5102 'blob': str, 5103 'upload': str, 5104 'integer': long, 5105 'bigint': long, 5106 'float': float, 5107 'double': float, 5108 'date': datetime.date, 5109 'time': datetime.time, 5110 'datetime': datetime.datetime, 5111 'id': long, 5112 'reference': long, 5113 'list:string': list, 5114 'list:integer': list, 5115 'list:reference': list, 5116 } 5117 5118 error_messages = {"javascript_needed": "This must yet be replaced" + 5119 " with javascript in order to work."} 5120
5121 - def __init__(self,db,uri='mongodb://127.0.0.1:5984/db', 5122 pool_size=0, folder=None, db_codec ='UTF-8', 5123 credential_decoder=IDENTITY, driver_args={}, 5124 adapter_args={}, do_connect=True, after_connection=None):
5125 5126 self.db = db 5127 self.uri = uri 5128 if do_connect: self.find_driver(adapter_args) 5129 import random 5130 from bson.objectid import ObjectId 5131 from bson.son import SON 5132 import pymongo.uri_parser 5133 5134 m = pymongo.uri_parser.parse_uri(uri) 5135 5136 self.SON = SON 5137 self.ObjectId = ObjectId 5138 self.random = random 5139 5140 self.dbengine = 'mongodb' 5141 self.folder = folder 5142 db['_lastsql'] = '' 5143 self.db_codec = 'UTF-8' 5144 self._after_connection = after_connection 5145 self.pool_size = pool_size 5146 #this is the minimum amount of replicates that it should wait 5147 # for on insert/update 5148 self.minimumreplication = adapter_args.get('minimumreplication',0) 5149 # by default all inserts and selects are performand asynchronous, 5150 # but now the default is 5151 # synchronous, except when overruled by either this default or 5152 # function parameter 5153 self.safe = adapter_args.get('safe',True) 5154 5155 if isinstance(m,tuple): 5156 m = {"database" : m[1]} 5157 if m.get('database')==None: 5158 raise SyntaxError("Database is required!") 5159 def connector(uri=self.uri,m=m): 5160 try: 5161 # Connection() is deprecated 5162 if hasattr(self.driver, "MongoClient"): 5163 Connection = self.driver.MongoClient 5164 else: 5165 Connection = self.driver.Connection 5166 return Connection(uri)[m.get('database')] 5167 except self.driver.errors.ConnectionFailure: 5168 inst = sys.exc_info()[1] 5169 raise SyntaxError("The connection to " + 5170 uri + " could not be made")
5171 5172 self.reconnect(connector,cursor=False)
5173
5174 - def object_id(self, arg=None):
5175 """ Convert input to a valid Mongodb ObjectId instance 5176 5177 self.object_id("<random>") -> ObjectId (not unique) instance """ 5178 if not arg: 5179 arg = 0 5180 if isinstance(arg, basestring): 5181 # we assume an integer as default input 5182 rawhex = len(arg.replace("0x", "").replace("L", "")) == 24 5183 if arg.isdigit() and (not rawhex): 5184 arg = int(arg) 5185 elif arg == "<random>": 5186 arg = int("0x%sL" % \ 5187 "".join([self.random.choice("0123456789abcdef") \ 5188 for x in range(24)]), 0) 5189 elif arg.isalnum(): 5190 if not arg.startswith("0x"): 5191 arg = "0x%s" % arg 5192 try: 5193 arg = int(arg, 0) 5194 except ValueError, e: 5195 raise ValueError( 5196 "invalid objectid argument string: %s" % e) 5197 else: 5198 raise ValueError("Invalid objectid argument string. " + 5199 "Requires an integer or base 16 value") 5200 elif isinstance(arg, self.ObjectId): 5201 return arg 5202 5203 if not isinstance(arg, (int, long)): 5204 raise TypeError("object_id argument must be of type " + 5205 "ObjectId or an objectid representable integer") 5206 if arg == 0: 5207 hexvalue = "".zfill(24) 5208 else: 5209 hexvalue = hex(arg)[2:].replace("L", "") 5210 return self.ObjectId(hexvalue)
5211
5212 - def parse_reference(self, value, field_type):
5213 # here we have to check for ObjectID before base parse 5214 if isinstance(value, self.ObjectId): 5215 value = int(str(value), 16) 5216 return super(MongoDBAdapter, 5217 self).parse_reference(value, field_type)
5218
5219 - def parse_id(self, value, field_type):
5220 if isinstance(value, self.ObjectId): 5221 value = int(str(value), 16) 5222 return super(MongoDBAdapter, 5223 self).parse_id(value, field_type)
5224
5225 - def represent(self, obj, fieldtype):
5226 # the base adatpter does not support MongoDB ObjectId 5227 if isinstance(obj, self.ObjectId): 5228 value = obj 5229 else: 5230 value = NoSQLAdapter.represent(self, obj, fieldtype) 5231 # reference types must be convert to ObjectID 5232 if fieldtype =='date': 5233 if value == None: 5234 return value 5235 # this piece of data can be stripped off based on the fieldtype 5236 t = datetime.time(0, 0, 0) 5237 # mongodb doesn't has a date object and so it must datetime, 5238 # string or integer 5239 return datetime.datetime.combine(value, t) 5240 elif fieldtype == 'time': 5241 if value == None: 5242 return value 5243 # this piece of data can be stripped of based on the fieldtype 5244 d = datetime.date(2000, 1, 1) 5245 # mongodb doesn't has a time object and so it must datetime, 5246 # string or integer 5247 return datetime.datetime.combine(d, value) 5248 elif (isinstance(fieldtype, basestring) and 5249 fieldtype.startswith('list:')): 5250 if fieldtype.startswith('list:reference'): 5251 newval = [] 5252 for v in value: 5253 newval.append(self.object_id(v)) 5254 return newval 5255 return value 5256 elif ((isinstance(fieldtype, basestring) and 5257 fieldtype.startswith("reference")) or 5258 (isinstance(fieldtype, Table))): 5259 value = self.object_id(value) 5260 5261 return value
5262 5263 # Safe determines whether a asynchronious request is done or a 5264 # synchronious action is done 5265 # For safety, we use by default synchronous requests
5266 - def insert(self, table, fields, safe=None):
5267 if safe==None: 5268 safe = self.safe 5269 ctable = self.connection[table._tablename] 5270 values = dict() 5271 for k, v in fields: 5272 if not k.name in ["id", "safe"]: 5273 fieldname = k.name 5274 fieldtype = table[k.name].type 5275 if ("reference" in fieldtype) or (fieldtype=="id"): 5276 values[fieldname] = self.object_id(v) 5277 else: 5278 values[fieldname] = self.represent(v, fieldtype) 5279 ctable.insert(values, safe=safe) 5280 return int(str(values['_id']), 16)
5281
5282 - def create_table(self, table, migrate=True, fake_migrate=False, 5283 polymodel=None, isCapped=False):
5284 if isCapped: 5285 raise RuntimeError("Not implemented")
5286
5287 - def count(self, query, distinct=None, snapshot=True):
5288 if distinct: 5289 raise RuntimeError("COUNT DISTINCT not supported") 5290 if not isinstance(query,Query): 5291 raise SyntaxError("Not Supported") 5292 tablename = self.get_table(query) 5293 return int(self.select(query,[self.db[tablename]._id], {}, 5294 count=True,snapshot=snapshot)['count'])
5295 # Maybe it would be faster if we just implemented the pymongo 5296 # .count() function which is probably quicker? 5297 # therefor call __select() connection[table].find(query).count() 5298 # Since this will probably reduce the return set? 5299
5300 - def expand(self, expression, field_type=None):
5301 if isinstance(expression, Query): 5302 # any query using 'id':= 5303 # set name as _id (as per pymongo/mongodb primary key) 5304 # convert second arg to an objectid field 5305 # (if its not already) 5306 # if second arg is 0 convert to objectid 5307 if isinstance(expression.first,Field) and \ 5308 ((expression.first.type == 'id') or \ 5309 ("reference" in expression.first.type)): 5310 if expression.first.type == 'id': 5311 expression.first.name = '_id' 5312 # cast to Mongo ObjectId 5313 if isinstance(expression.second, (tuple, list, set)): 5314 expression.second = [self.object_id(item) for 5315 item in expression.second] 5316 else: 5317 expression.second = self.object_id(expression.second) 5318 result = expression.op(expression.first, expression.second) 5319 5320 if isinstance(expression, Field): 5321 if expression.type=='id': 5322 result = "_id" 5323 else: 5324 result = expression.name 5325 elif isinstance(expression, (Expression, Query)): 5326 if not expression.second is None: 5327 result = expression.op(expression.first, expression.second) 5328 elif not expression.first is None: 5329 result = expression.op(expression.first) 5330 elif not isinstance(expression.op, str): 5331 result = expression.op() 5332 else: 5333 result = expression.op 5334 elif field_type: 5335 result = self.represent(expression,field_type) 5336 elif isinstance(expression,(list,tuple)): 5337 result = ','.join(self.represent(item,field_type) for 5338 item in expression) 5339 else: 5340 result = expression 5341 return result
5342
5343 - def _select(self, query, fields, attributes):
5344 if 'for_update' in attributes: 5345 logging.warn('mongodb does not support for_update') 5346 for key in set(attributes.keys())-set(('limitby', 5347 'orderby','for_update')): 5348 if attributes[key]!=None: 5349 logging.warn('select attribute not implemented: %s' % key) 5350 5351 new_fields=[] 5352 mongosort_list = [] 5353 5354 # try an orderby attribute 5355 orderby = attributes.get('orderby', False) 5356 limitby = attributes.get('limitby', False) 5357 # distinct = attributes.get('distinct', False) 5358 if orderby: 5359 if isinstance(orderby, (list, tuple)): 5360 orderby = xorify(orderby) 5361 5362 # !!!! need to add 'random' 5363 for f in self.expand(orderby).split(','): 5364 if f.startswith('-'): 5365 mongosort_list.append((f[1:], -1)) 5366 else: 5367 mongosort_list.append((f, 1)) 5368 5369 if limitby: 5370 limitby_skip, limitby_limit = limitby 5371 else: 5372 limitby_skip = limitby_limit = 0 5373 5374 mongofields_dict = self.SON() 5375 mongoqry_dict = {} 5376 for item in fields: 5377 if isinstance(item, SQLALL): 5378 new_fields += item._table 5379 else: 5380 new_fields.append(item) 5381 fields = new_fields 5382 if isinstance(query,Query): 5383 tablename = self.get_table(query) 5384 elif len(fields) != 0: 5385 tablename = fields[0].tablename 5386 else: 5387 raise SyntaxError("The table name could not be found in " + 5388 "the query nor from the select statement.") 5389 mongoqry_dict = self.expand(query) 5390 fields = fields or self.db[tablename] 5391 for field in fields: 5392 mongofields_dict[field.name] = 1 5393 5394 return tablename, mongoqry_dict, mongofields_dict, mongosort_list, \ 5395 limitby_limit, limitby_skip
5396 5397
5398 - def select(self, query, fields, attributes, count=False, 5399 snapshot=False):
5400 # TODO: support joins 5401 tablename, mongoqry_dict, mongofields_dict, mongosort_list, \ 5402 limitby_limit, limitby_skip = self._select(query, fields, attributes) 5403 ctable = self.connection[tablename] 5404 5405 if count: 5406 return {'count' : ctable.find( 5407 mongoqry_dict, mongofields_dict, 5408 skip=limitby_skip, limit=limitby_limit, 5409 sort=mongosort_list, snapshot=snapshot).count()} 5410 else: 5411 # pymongo cursor object 5412 mongo_list_dicts = ctable.find(mongoqry_dict, 5413 mongofields_dict, skip=limitby_skip, 5414 limit=limitby_limit, sort=mongosort_list, 5415 snapshot=snapshot) 5416 rows = [] 5417 # populate row in proper order 5418 # Here we replace ._id with .id to follow the standard naming 5419 colnames = [] 5420 newnames = [] 5421 for field in fields: 5422 colname = str(field) 5423 colnames.append(colname) 5424 tablename, fieldname = colname.split(".") 5425 if fieldname == "_id": 5426 # Mongodb reserved uuid key 5427 field.name = "id" 5428 newnames.append(".".join((tablename, field.name))) 5429 5430 for record in mongo_list_dicts: 5431 row=[] 5432 for colname in colnames: 5433 tablename, fieldname = colname.split(".") 5434 # switch to Mongo _id uuids for retrieving 5435 # record id's 5436 if fieldname == "id": fieldname = "_id" 5437 if fieldname in record: 5438 value = record[fieldname] 5439 else: 5440 value = None 5441 row.append(value) 5442 rows.append(row) 5443 5444 processor = attributes.get('processor', self.parse) 5445 result = processor(rows, fields, newnames, False) 5446 return result
5447 5448
5449 - def INVERT(self, first):
5450 #print "in invert first=%s" % first 5451 return '-%s' % self.expand(first)
5452
5453 - def drop(self, table, mode=''):
5454 ctable = self.connection[table._tablename] 5455 ctable.drop()
5456 5457
5458 - def truncate(self, table, mode, safe=None):
5459 if safe == None: 5460 safe=self.safe 5461 ctable = self.connection[table._tablename] 5462 ctable.remove(None, safe=True)
5463
5464 - def oupdate(self, tablename, query, fields):
5465 if not isinstance(query, Query): 5466 raise SyntaxError("Not Supported") 5467 filter = None 5468 if query: 5469 filter = self.expand(query) 5470 modify = {'$set': dict((k.name, self.represent(v, k.type)) for 5471 k, v in fields)} 5472 return modify, filter
5473
5474 - def update(self, tablename, query, fields, safe=None):
5475 if safe == None: 5476 safe = self.safe 5477 # return amount of adjusted rows or zero, but no exceptions 5478 # @ related not finding the result 5479 if not isinstance(query, Query): 5480 raise RuntimeError("Not implemented") 5481 amount = self.count(query, False) 5482 modify, filter = self.oupdate(tablename, query, fields) 5483 try: 5484 result = self.connection[tablename].update(filter, 5485 modify, multi=True, safe=safe) 5486 if safe: 5487 try: 5488 # if result count is available fetch it 5489 return result["n"] 5490 except (KeyError, AttributeError, TypeError): 5491 return amount 5492 else: 5493 return amount 5494 except Exception, e: 5495 # TODO Reverse update query to verifiy that the query succeded 5496 raise RuntimeError("uncaught exception when updating rows: %s" % e)
5497 5498 #this function returns a dict with the where clause and update fields
5499 - def _update(self,tablename,query,fields):
5500 return str(self.oupdate(tablename, query, fields))
5501
5502 - def delete(self, tablename, query, safe=None):
5503 if safe is None: 5504 safe = self.safe 5505 amount = 0 5506 amount = self.count(query, False) 5507 if not isinstance(query, Query): 5508 raise RuntimeError("query type %s is not supported" % \ 5509 type(query)) 5510 filter = self.expand(query) 5511 self._delete(tablename, filter, safe=safe) 5512 return amount
5513
5514 - def _delete(self, tablename, filter, safe=None):
5515 return self.connection[tablename].remove(filter, safe=safe)
5516
5517 - def bulk_insert(self, table, items):
5518 return [self.insert(table,item) for item in items]
5519 5520 # TODO This will probably not work:(
5521 - def NOT(self, first):
5522 result = {} 5523 result["$not"] = self.expand(first) 5524 return result
5525
5526 - def AND(self,first,second):
5527 f = self.expand(first) 5528 s = self.expand(second) 5529 f.update(s) 5530 return f
5531
5532 - def OR(self,first,second):
5533 # pymongo expects: .find({'$or': [{'name':'1'}, {'name':'2'}]}) 5534 result = {} 5535 f = self.expand(first) 5536 s = self.expand(second) 5537 result['$or'] = [f,s] 5538 return result
5539
5540 - def BELONGS(self, first, second):
5541 if isinstance(second, str): 5542 return {self.expand(first) : {"$in" : [ second[:-1]]} } 5543 elif second==[] or second==() or second==set(): 5544 return {1:0} 5545 items = [self.expand(item, first.type) for item in second] 5546 return {self.expand(first) : {"$in" : items} }
5547
5548 - def EQ(self,first,second):
5549 result = {} 5550 result[self.expand(first)] = self.expand(second) 5551 return result
5552
5553 - def NE(self, first, second=None):
5554 result = {} 5555 result[self.expand(first)] = {'$ne': self.expand(second)} 5556 return result
5557
5558 - def LT(self,first,second=None):
5559 if second is None: 5560 raise RuntimeError("Cannot compare %s < None" % first) 5561 result = {} 5562 result[self.expand(first)] = {'$lt': self.expand(second)} 5563 return result
5564
5565 - def LE(self,first,second=None):
5566 if second is None: 5567 raise RuntimeError("Cannot compare %s <= None" % first) 5568 result = {} 5569 result[self.expand(first)] = {'$lte': self.expand(second)} 5570 return result
5571
5572 - def GT(self,first,second):
5573 result = {} 5574 result[self.expand(first)] = {'$gt': self.expand(second)} 5575 return result
5576
5577 - def GE(self,first,second=None):
5578 if second is None: 5579 raise RuntimeError("Cannot compare %s >= None" % first) 5580 result = {} 5581 result[self.expand(first)] = {'$gte': self.expand(second)} 5582 return result
5583
5584 - def ADD(self, first, second):
5585 raise NotImplementedError(self.error_messages["javascript_needed"]) 5586 return '%s + %s' % (self.expand(first), 5587 self.expand(second, first.type))
5588
5589 - def SUB(self, first, second):
5590 raise NotImplementedError(self.error_messages["javascript_needed"]) 5591 return '(%s - %s)' % (self.expand(first), 5592 self.expand(second, first.type))
5593
5594 - def MUL(self, first, second):
5595 raise NotImplementedError(self.error_messages["javascript_needed"]) 5596 return '(%s * %s)' % (self.expand(first), 5597 self.expand(second, first.type))
5598
5599 - def DIV(self, first, second):
5600 raise NotImplementedError(self.error_messages["javascript_needed"]) 5601 return '(%s / %s)' % (self.expand(first), 5602 self.expand(second, first.type))
5603
5604 - def MOD(self, first, second):
5605 raise NotImplementedError(self.error_messages["javascript_needed"]) 5606 return '(%s %% %s)' % (self.expand(first), 5607 self.expand(second, first.type))
5608
5609 - def AS(self, first, second):
5610 raise NotImplementedError(self.error_messages["javascript_needed"]) 5611 return '%s AS %s' % (self.expand(first), second)
5612 5613 # We could implement an option that simulates a full featured SQL 5614 # database. But I think the option should be set explicit or 5615 # implemented as another library.
5616 - def ON(self, first, second):
5617 raise NotImplementedError("This is not possible in NoSQL" + 5618 " but can be simulated with a wrapper.") 5619 return '%s ON %s' % (self.expand(first), self.expand(second))
5620 5621 # BLOW ARE TWO IMPLEMENTATIONS OF THE SAME FUNCITONS 5622 # WHICH ONE IS BEST? 5623
5624 - def COMMA(self, first, second):
5625 return '%s, %s' % (self.expand(first), self.expand(second))
5626
5627 - def LIKE(self, first, second):
5628 #escaping regex operators? 5629 return {self.expand(first): ('%s' % \ 5630 self.expand(second, 'string').replace('%','/'))}
5631
5632 - def STARTSWITH(self, first, second):
5633 #escaping regex operators? 5634 return {self.expand(first): ('/^%s/' % \ 5635 self.expand(second, 'string'))}
5636
5637 - def ENDSWITH(self, first, second):
5638 #escaping regex operators? 5639 return {self.expand(first): ('/%s^/' % \ 5640 self.expand(second, 'string'))}
5641
5642 - def CONTAINS(self, first, second, case_sensitive=False):
5643 # silently ignore, only case sensitive 5644 # There is a technical difference, but mongodb doesn't support 5645 # that, but the result will be the same 5646 return {self.expand(first) : ('/%s/' % \ 5647 self.expand(second, 'string'))}
5648
5649 - def LIKE(self, first, second):
5650 import re 5651 return {self.expand(first): {'$regex': \ 5652 re.escape(self.expand(second, 5653 'string')).replace('%','.*')}}
5654 5655 #TODO verify full compatibilty with official SQL Like operator
5656 - def STARTSWITH(self, first, second):
5657 #TODO Solve almost the same problem as with endswith 5658 import re 5659 return {self.expand(first): {'$regex' : '^' + 5660 re.escape(self.expand(second, 5661 'string'))}}
5662 5663 #TODO verify full compatibilty with official SQL Like operator
5664 - def ENDSWITH(self, first, second):
5665 #escaping regex operators? 5666 #TODO if searched for a name like zsa_corbitt and the function 5667 # is endswith('a') then this is also returned. 5668 # Aldo it end with a t 5669 import re 5670 return {self.expand(first): {'$regex': \ 5671 re.escape(self.expand(second, 'string')) + '$'}}
5672 5673 #TODO verify full compatibilty with official oracle contains operator
5674 - def CONTAINS(self, first, second, case_sensitive=False):
5675 # silently ignore, only case sensitive 5676 #There is a technical difference, but mongodb doesn't support 5677 # that, but the result will be the same 5678 #TODO contains operators need to be transformed to Regex 5679 return {self.expand(first) : {' $regex': \ 5680 ".*" + re.escape(self.expand(second, 'string')) + ".*"}}
5681
5682 5683 -class IMAPAdapter(NoSQLAdapter):
5684 drivers = ('imaplib',) 5685 5686 """ IMAP server adapter 5687 5688 This class is intended as an interface with 5689 email IMAP servers to perform simple queries in the 5690 web2py DAL query syntax, so email read, search and 5691 other related IMAP mail services (as those implemented 5692 by brands like Google(r), and Yahoo!(r) 5693 can be managed from web2py applications. 5694 5695 The code uses examples by Yuji Tomita on this post: 5696 http://yuji.wordpress.com/2011/06/22/python-imaplib-imap-example-with-gmail/#comment-1137 5697 and is based in docs for Python imaplib, python email 5698 and email IETF's (i.e. RFC2060 and RFC3501) 5699 5700 This adapter was tested with a small set of operations with Gmail(r). Other 5701 services requests could raise command syntax and response data issues. 5702 5703 It creates its table and field names "statically", 5704 meaning that the developer should leave the table and field 5705 definitions to the DAL instance by calling the adapter's 5706 .define_tables() method. The tables are defined with the 5707 IMAP server mailbox list information. 5708 5709 .define_tables() returns a dictionary mapping dal tablenames 5710 to the server mailbox names with the following structure: 5711 5712 {<tablename>: str <server mailbox name>} 5713 5714 Here is a list of supported fields: 5715 5716 Field Type Description 5717 ################################################################ 5718 uid string 5719 answered boolean Flag 5720 created date 5721 content list:string A list of text or html parts 5722 to string 5723 cc string 5724 bcc string 5725 size integer the amount of octets of the message* 5726 deleted boolean Flag 5727 draft boolean Flag 5728 flagged boolean Flag 5729 sender string 5730 recent boolean Flag 5731 seen boolean Flag 5732 subject string 5733 mime string The mime header declaration 5734 email string The complete RFC822 message** 5735 attachments <type list> Each non text part as dict 5736 encoding string The main detected encoding 5737 5738 *At the application side it is measured as the length of the RFC822 5739 message string 5740 5741 WARNING: As row id's are mapped to email sequence numbers, 5742 make sure your imap client web2py app does not delete messages 5743 during select or update actions, to prevent 5744 updating or deleting different messages. 5745 Sequence numbers change whenever the mailbox is updated. 5746 To avoid this sequence numbers issues, it is recommended the use 5747 of uid fields in query references (although the update and delete 5748 in separate actions rule still applies). 5749 5750 # This is the code recommended to start imap support 5751 # at the app's model: 5752 5753 imapdb = DAL("imap://user:password@server:port", pool_size=1) # port 993 for ssl 5754 imapdb.define_tables() 5755 5756 Here is an (incomplete) list of possible imap commands: 5757 5758 # Count today's unseen messages 5759 # smaller than 6000 octets from the 5760 # inbox mailbox 5761 5762 q = imapdb.INBOX.seen == False 5763 q &= imapdb.INBOX.created == datetime.date.today() 5764 q &= imapdb.INBOX.size < 6000 5765 unread = imapdb(q).count() 5766 5767 # Fetch last query messages 5768 rows = imapdb(q).select() 5769 5770 # it is also possible to filter query select results with limitby and 5771 # sequences of mailbox fields 5772 5773 set.select(<fields sequence>, limitby=(<int>, <int>)) 5774 5775 # Mark last query messages as seen 5776 messages = [row.uid for row in rows] 5777 seen = imapdb(imapdb.INBOX.uid.belongs(messages)).update(seen=True) 5778 5779 # Delete messages in the imap database that have mails from mr. Gumby 5780 5781 deleted = 0 5782 for mailbox in imapdb.tables 5783 deleted += imapdb(imapdb[mailbox].sender.contains("gumby")).delete() 5784 5785 # It is possible also to mark messages for deletion instead of ereasing them 5786 # directly with set.update(deleted=True) 5787 5788 5789 # This object give access 5790 # to the adapter auto mailbox 5791 # mapped names (which native 5792 # mailbox has what table name) 5793 5794 imapdb.mailboxes <dict> # tablename, server native name pairs 5795 5796 # To retrieve a table native mailbox name use: 5797 imapdb.<table>.mailbox 5798 5799 ### New features v2.4.1: 5800 5801 # Declare mailboxes statically with tablename, name pairs 5802 # This avoids the extra server names retrieval 5803 5804 imapdb.define_tables({"inbox": "INBOX"}) 5805 5806 # Selects without content/attachments/email columns will only 5807 # fetch header and flags 5808 5809 imapdb(q).select(imapdb.INBOX.sender, imapdb.INBOX.subject) 5810 """ 5811 5812 types = { 5813 'string': str, 5814 'text': str, 5815 'date': datetime.date, 5816 'datetime': datetime.datetime, 5817 'id': long, 5818 'boolean': bool, 5819 'integer': int, 5820 'bigint': long, 5821 'blob': str, 5822 'list:string': str, 5823 } 5824 5825 dbengine = 'imap' 5826 5827 REGEX_URI = re.compile('^(?P<user>[^:]+)(\:(?P<password>[^@]*))?@(?P<host>[^\:@]+)(\:(?P<port>[0-9]+))?$') 5828
5829 - def __init__(self, 5830 db, 5831 uri, 5832 pool_size=0, 5833 folder=None, 5834 db_codec ='UTF-8', 5835 credential_decoder=IDENTITY, 5836 driver_args={}, 5837 adapter_args={}, 5838 do_connect=True, 5839 after_connection=None):
5840 5841 # db uri: user@example.com:password@imap.server.com:123 5842 # TODO: max size adapter argument for preventing large mail transfers 5843 5844 self.db = db 5845 self.uri = uri 5846 if do_connect: self.find_driver(adapter_args) 5847 self.pool_size=pool_size 5848 self.folder = folder 5849 self.db_codec = db_codec 5850 self._after_connection = after_connection 5851 self.credential_decoder = credential_decoder 5852 self.driver_args = driver_args 5853 self.adapter_args = adapter_args 5854 self.mailbox_size = None 5855 self.static_names = None 5856 self.charset = sys.getfilesystemencoding() 5857 # imap class 5858 self.imap4 = None 5859 uri = uri.split("://")[1] 5860 5861 """ MESSAGE is an identifier for sequence number""" 5862 5863 self.flags = ['\\Deleted', '\\Draft', '\\Flagged', 5864 '\\Recent', '\\Seen', '\\Answered'] 5865 self.search_fields = { 5866 'id': 'MESSAGE', 'created': 'DATE', 5867 'uid': 'UID', 'sender': 'FROM', 5868 'to': 'TO', 'cc': 'CC', 5869 'bcc': 'BCC', 'content': 'TEXT', 5870 'size': 'SIZE', 'deleted': '\\Deleted', 5871 'draft': '\\Draft', 'flagged': '\\Flagged', 5872 'recent': '\\Recent', 'seen': '\\Seen', 5873 'subject': 'SUBJECT', 'answered': '\\Answered', 5874 'mime': None, 'email': None, 5875 'attachments': None 5876 } 5877 5878 db['_lastsql'] = '' 5879 5880 m = self.REGEX_URI.match(uri) 5881 user = m.group('user') 5882 password = m.group('password') 5883 host = m.group('host') 5884 port = int(m.group('port')) 5885 over_ssl = False 5886 if port==993: 5887 over_ssl = True 5888 5889 driver_args.update(host=host,port=port, password=password, user=user) 5890 def connector(driver_args=driver_args): 5891 # it is assumed sucessful authentication alLways 5892 # TODO: support direct connection and login tests 5893 if over_ssl: 5894 self.imap4 = self.driver.IMAP4_SSL 5895 else: 5896 self.imap4 = self.driver.IMAP4 5897 connection = self.imap4(driver_args["host"], driver_args["port"]) 5898 data = connection.login(driver_args["user"], driver_args["password"]) 5899 5900 # static mailbox list 5901 connection.mailbox_names = None 5902 5903 # dummy cursor function 5904 connection.cursor = lambda : True 5905 5906 return connection
5907 5908 self.db.define_tables = self.define_tables 5909 self.connector = connector 5910 if do_connect: self.reconnect()
5911
5912 - def reconnect(self, f=None, cursor=True):
5913 """ 5914 IMAP4 Pool connection method 5915 5916 imap connection lacks of self cursor command. 5917 A custom command should be provided as a replacement 5918 for connection pooling to prevent uncaught remote session 5919 closing 5920 5921 """ 5922 if getattr(self,'connection',None) != None: 5923 return 5924 if f is None: 5925 f = self.connector 5926 5927 if not self.pool_size: 5928 self.connection = f() 5929 self.cursor = cursor and self.connection.cursor() 5930 else: 5931 POOLS = ConnectionPool.POOLS 5932 uri = self.uri 5933 while True: 5934 GLOBAL_LOCKER.acquire() 5935 if not uri in POOLS: 5936 POOLS[uri] = [] 5937 if POOLS[uri]: 5938 self.connection = POOLS[uri].pop() 5939 GLOBAL_LOCKER.release() 5940 self.cursor = cursor and self.connection.cursor() 5941 if self.cursor and self.check_active_connection: 5942 try: 5943 # check if connection is alive or close it 5944 result, data = self.connection.list() 5945 except: 5946 # Possible connection reset error 5947 # TODO: read exception class 5948 self.connection = f() 5949 break 5950 else: 5951 GLOBAL_LOCKER.release() 5952 self.connection = f() 5953 self.cursor = cursor and self.connection.cursor() 5954 break 5955 self.after_connection_hook()
5956
5957 - def get_last_message(self, tablename):
5958 last_message = None 5959 # request mailbox list to the server 5960 # if needed 5961 if not isinstance(self.connection.mailbox_names, dict): 5962 self.get_mailboxes() 5963 try: 5964 result = self.connection.select(self.connection.mailbox_names[tablename]) 5965 last_message = int(result[1][0]) 5966 except (IndexError, ValueError, TypeError, KeyError): 5967 e = sys.exc_info()[1] 5968 LOGGER.debug("Error retrieving the last mailbox sequence number. %s" % str(e)) 5969 return last_message
5970
5971 - def get_uid_bounds(self, tablename):
5972 if not isinstance(self.connection.mailbox_names, dict): 5973 self.get_mailboxes() 5974 # fetch first and last messages 5975 # return (first, last) messages uid's 5976 last_message = self.get_last_message(tablename) 5977 result, data = self.connection.uid("search", None, "(ALL)") 5978 uid_list = data[0].strip().split() 5979 if len(uid_list) <= 0: 5980 return None 5981 else: 5982 return (uid_list[0], uid_list[-1])
5983
5984 - def convert_date(self, date, add=None):
5985 if add is None: 5986 add = datetime.timedelta() 5987 """ Convert a date object to a string 5988 with d-Mon-Y style for IMAP or the inverse 5989 case 5990 5991 add <timedelta> adds to the date object 5992 """ 5993 months = [None, "Jan","Feb","Mar","Apr","May","Jun", 5994 "Jul", "Aug","Sep","Oct","Nov","Dec"] 5995 if isinstance(date, basestring): 5996 # Prevent unexpected date response format 5997 try: 5998 dayname, datestring = date.split(",") 5999 except (ValueError): 6000 LOGGER.debug("Could not parse date text: %s" % date) 6001 return None 6002 date_list = datestring.strip().split() 6003 year = int(date_list[2]) 6004 month = months.index(date_list[1]) 6005 day = int(date_list[0]) 6006 hms = map(int, date_list[3].split(":")) 6007 return datetime.datetime(year, month, day, 6008 hms[0], hms[1], hms[2]) + add 6009 elif isinstance(date, (datetime.datetime, datetime.date)): 6010 return (date + add).strftime("%d-%b-%Y") 6011 6012 else: 6013 return None
6014 6015 @staticmethod
6016 - def header_represent(f, r):
6017 from email.header import decode_header 6018 text, encoding = decode_header(f)[0] 6019 return text
6020
6021 - def encode_text(self, text, charset, errors="replace"):
6022 """ convert text for mail to unicode""" 6023 if text is None: 6024 text = "" 6025 else: 6026 if isinstance(text, str): 6027 if charset is None: 6028 text = unicode(text, "utf-8", errors) 6029 else: 6030 text = unicode(text, charset, errors) 6031 else: 6032 raise Exception("Unsupported mail text type %s" % type(text)) 6033 return text.encode("utf-8")
6034
6035 - def get_charset(self, message):
6036 charset = message.get_content_charset() 6037 return charset
6038
6039 - def get_mailboxes(self):
6040 """ Query the mail database for mailbox names """ 6041 if self.static_names: 6042 # statically defined mailbox names 6043 self.connection.mailbox_names = self.static_names 6044 return self.static_names.keys() 6045 6046 mailboxes_list = self.connection.list() 6047 self.connection.mailbox_names = dict() 6048 mailboxes = list() 6049 x = 0 6050 for item in mailboxes_list[1]: 6051 x = x + 1 6052 item = item.strip() 6053 if not "NOSELECT" in item.upper(): 6054 sub_items = item.split("\"") 6055 sub_items = [sub_item for sub_item in sub_items \ 6056 if len(sub_item.strip()) > 0] 6057 # mailbox = sub_items[len(sub_items) -1] 6058 mailbox = sub_items[-1] 6059 # remove unwanted characters and store original names 6060 # Don't allow leading non alphabetic characters 6061 mailbox_name = re.sub('^[_0-9]*', '', re.sub('[^_\w]','',re.sub('[/ ]','_',mailbox))) 6062 mailboxes.append(mailbox_name) 6063 self.connection.mailbox_names[mailbox_name] = mailbox 6064 6065 return mailboxes
6066
6067 - def get_query_mailbox(self, query):
6068 nofield = True 6069 tablename = None 6070 attr = query 6071 while nofield: 6072 if hasattr(attr, "first"): 6073 attr = attr.first 6074 if isinstance(attr, Field): 6075 return attr.tablename 6076 elif isinstance(attr, Query): 6077 pass 6078 else: 6079 return None 6080 else: 6081 return None 6082 return tablename
6083
6084 - def is_flag(self, flag):
6085 if self.search_fields.get(flag, None) in self.flags: 6086 return True 6087 else: 6088 return False
6089
6090 - def define_tables(self, mailbox_names=None):
6091 """ 6092 Auto create common IMAP fileds 6093 6094 This function creates fields definitions "statically" 6095 meaning that custom fields as in other adapters should 6096 not be supported and definitions handled on a service/mode 6097 basis (local syntax for Gmail(r), Ymail(r) 6098 6099 Returns a dictionary with tablename, server native mailbox name 6100 pairs. 6101 """ 6102 if mailbox_names: 6103 # optional statically declared mailboxes 6104 self.static_names = mailbox_names 6105 else: 6106 self.static_names = None 6107 if not isinstance(self.connection.mailbox_names, dict): 6108 self.get_mailboxes() 6109 6110 names = self.connection.mailbox_names.keys() 6111 6112 for name in names: 6113 self.db.define_table("%s" % name, 6114 Field("uid", "string", writable=False), 6115 Field("answered", "boolean"), 6116 Field("created", "datetime", writable=False), 6117 Field("content", "list:string", writable=False), 6118 Field("to", "string", writable=False), 6119 Field("cc", "string", writable=False), 6120 Field("bcc", "string", writable=False), 6121 Field("size", "integer", writable=False), 6122 Field("deleted", "boolean"), 6123 Field("draft", "boolean"), 6124 Field("flagged", "boolean"), 6125 Field("sender", "string", writable=False), 6126 Field("recent", "boolean", writable=False), 6127 Field("seen", "boolean"), 6128 Field("subject", "string", writable=False), 6129 Field("mime", "string", writable=False), 6130 Field("email", "string", writable=False, readable=False), 6131 Field("attachments", list, writable=False, readable=False), 6132 Field("encoding") 6133 ) 6134 6135 # Set a special _mailbox attribute for storing 6136 # native mailbox names 6137 self.db[name].mailbox = \ 6138 self.connection.mailbox_names[name] 6139 6140 # decode quoted printable 6141 self.db[name].to.represent = self.db[name].cc.represent = \ 6142 self.db[name].bcc.represent = self.db[name].sender.represent = \ 6143 self.db[name].subject.represent = self.header_represent 6144 6145 # Set the db instance mailbox collections 6146 self.db.mailboxes = self.connection.mailbox_names 6147 return self.db.mailboxes
6148
6149 - def create_table(self, *args, **kwargs):
6150 # not implemented 6151 # but required by DAL 6152 pass
6153
6154 - def _select(self, query, fields, attributes):
6155 if use_common_filters(query): 6156 query = self.common_filter(query, [self.get_query_mailbox(query),]) 6157 return str(query)
6158
6159 - def select(self, query, fields, attributes):
6160 """ Search and Fetch records and return web2py rows 6161 """ 6162 # move this statement elsewhere (upper-level) 6163 if use_common_filters(query): 6164 query = self.common_filter(query, [self.get_query_mailbox(query),]) 6165 6166 import email 6167 # get records from imap server with search + fetch 6168 # convert results to a dictionary 6169 tablename = None 6170 fetch_results = list() 6171 6172 if isinstance(query, Query): 6173 tablename = self.get_table(query) 6174 mailbox = self.connection.mailbox_names.get(tablename, None) 6175 if mailbox is None: 6176 raise ValueError("Mailbox name not found: %s" % mailbox) 6177 else: 6178 # select with readonly 6179 result, selected = self.connection.select(mailbox, True) 6180 if result != "OK": 6181 raise Exception("IMAP error: %s" % selected) 6182 self.mailbox_size = int(selected[0]) 6183 search_query = "(%s)" % str(query).strip() 6184 search_result = self.connection.uid("search", None, search_query) 6185 # Normal IMAP response OK is assumed (change this) 6186 if search_result[0] == "OK": 6187 # For "light" remote server responses just get the first 6188 # ten records (change for non-experimental implementation) 6189 # However, light responses are not guaranteed with this 6190 # approach, just fewer messages. 6191 limitby = attributes.get('limitby', None) 6192 messages_set = search_result[1][0].split() 6193 # descending order 6194 messages_set.reverse() 6195 if limitby is not None: 6196 # TODO: orderby, asc/desc, limitby from complete message set 6197 messages_set = messages_set[int(limitby[0]):int(limitby[1])] 6198 6199 # keep the requests small for header/flags 6200 if any([(field.name in ["content", "size", 6201 "attachments", "email"]) for 6202 field in fields]): 6203 imap_fields = "(RFC822 FLAGS)" 6204 else: 6205 imap_fields = "(RFC822.HEADER FLAGS)" 6206 6207 if len(messages_set) > 0: 6208 # create fetch results object list 6209 # fetch each remote message and store it in memmory 6210 # (change to multi-fetch command syntax for faster 6211 # transactions) 6212 for uid in messages_set: 6213 # fetch the RFC822 message body 6214 typ, data = self.connection.uid("fetch", uid, imap_fields) 6215 if typ == "OK": 6216 fr = {"message": int(data[0][0].split()[0]), 6217 "uid": int(uid), 6218 "email": email.message_from_string(data[0][1]), 6219 "raw_message": data[0][1]} 6220 fr["multipart"] = fr["email"].is_multipart() 6221 # fetch flags for the message 6222 fr["flags"] = self.driver.ParseFlags(data[1]) 6223 fetch_results.append(fr) 6224 else: 6225 # error retrieving the message body 6226 raise Exception("IMAP error retrieving the body: %s" % data) 6227 else: 6228 raise Exception("IMAP search error: %s" % search_result[1]) 6229 elif isinstance(query, (Expression, basestring)): 6230 raise NotImplementedError() 6231 else: 6232 raise TypeError("Unexpected query type") 6233 6234 imapqry_dict = {} 6235 imapfields_dict = {} 6236 6237 if len(fields) == 1 and isinstance(fields[0], SQLALL): 6238 allfields = True 6239 elif len(fields) == 0: 6240 allfields = True 6241 else: 6242 allfields = False 6243 if allfields: 6244 colnames = ["%s.%s" % (tablename, field) for field in self.search_fields.keys()] 6245 else: 6246 colnames = ["%s.%s" % (tablename, field.name) for field in fields] 6247 6248 for k in colnames: 6249 imapfields_dict[k] = k 6250 6251 imapqry_list = list() 6252 imapqry_array = list() 6253 for fr in fetch_results: 6254 attachments = [] 6255 content = [] 6256 size = 0 6257 n = int(fr["message"]) 6258 item_dict = dict() 6259 message = fr["email"] 6260 uid = fr["uid"] 6261 charset = self.get_charset(message) 6262 flags = fr["flags"] 6263 raw_message = fr["raw_message"] 6264 # Return messages data mapping static fields 6265 # and fetched results. Mapping should be made 6266 # outside the select function (with auxiliary 6267 # instance methods) 6268 6269 # pending: search flags states trough the email message 6270 # instances for correct output 6271 6272 # preserve subject encoding (ASCII/quoted printable) 6273 6274 if "%s.id" % tablename in colnames: 6275 item_dict["%s.id" % tablename] = n 6276 if "%s.created" % tablename in colnames: 6277 item_dict["%s.created" % tablename] = self.convert_date(message["Date"]) 6278 if "%s.uid" % tablename in colnames: 6279 item_dict["%s.uid" % tablename] = uid 6280 if "%s.sender" % tablename in colnames: 6281 # If there is no encoding found in the message header 6282 # force utf-8 replacing characters (change this to 6283 # module's defaults). Applies to .sender, .to, .cc and .bcc fields 6284 item_dict["%s.sender" % tablename] = message["From"] 6285 if "%s.to" % tablename in colnames: 6286 item_dict["%s.to" % tablename] = message["To"] 6287 if "%s.cc" % tablename in colnames: 6288 if "Cc" in message.keys(): 6289 item_dict["%s.cc" % tablename] = message["Cc"] 6290 else: 6291 item_dict["%s.cc" % tablename] = "" 6292 if "%s.bcc" % tablename in colnames: 6293 if "Bcc" in message.keys(): 6294 item_dict["%s.bcc" % tablename] = message["Bcc"] 6295 else: 6296 item_dict["%s.bcc" % tablename] = "" 6297 if "%s.deleted" % tablename in colnames: 6298 item_dict["%s.deleted" % tablename] = "\\Deleted" in flags 6299 if "%s.draft" % tablename in colnames: 6300 item_dict["%s.draft" % tablename] = "\\Draft" in flags 6301 if "%s.flagged" % tablename in colnames: 6302 item_dict["%s.flagged" % tablename] = "\\Flagged" in flags 6303 if "%s.recent" % tablename in colnames: 6304 item_dict["%s.recent" % tablename] = "\\Recent" in flags 6305 if "%s.seen" % tablename in colnames: 6306 item_dict["%s.seen" % tablename] = "\\Seen" in flags 6307 if "%s.subject" % tablename in colnames: 6308 item_dict["%s.subject" % tablename] = message["Subject"] 6309 if "%s.answered" % tablename in colnames: 6310 item_dict["%s.answered" % tablename] = "\\Answered" in flags 6311 if "%s.mime" % tablename in colnames: 6312 item_dict["%s.mime" % tablename] = message.get_content_type() 6313 if "%s.encoding" % tablename in colnames: 6314 item_dict["%s.encoding" % tablename] = charset 6315 6316 # Here goes the whole RFC822 body as an email instance 6317 # for controller side custom processing 6318 # The message is stored as a raw string 6319 # >> email.message_from_string(raw string) 6320 # returns a Message object for enhanced object processing 6321 if "%s.email" % tablename in colnames: 6322 # WARNING: no encoding performed (raw message) 6323 item_dict["%s.email" % tablename] = raw_message 6324 6325 # Size measure as suggested in a Velocity Reviews post 6326 # by Tim Williams: "how to get size of email attachment" 6327 # Note: len() and server RFC822.SIZE reports doesn't match 6328 # To retrieve the server size for representation would add a new 6329 # fetch transaction to the process 6330 for part in message.walk(): 6331 maintype = part.get_content_maintype() 6332 if ("%s.attachments" % tablename in colnames) or \ 6333 ("%s.content" % tablename in colnames): 6334 if "%s.attachments" % tablename in colnames: 6335 if not ("text" in maintype): 6336 payload = part.get_payload(decode=True) 6337 if payload: 6338 attachment = { 6339 "payload": payload, 6340 "filename": part.get_filename(), 6341 "encoding": part.get_content_charset(), 6342 "mime": part.get_content_type(), 6343 "disposition": part["Content-Disposition"]} 6344 attachments.append(attachment) 6345 if "%s.content" % tablename in colnames: 6346 payload = part.get_payload(decode=True) 6347 part_charset = self.get_charset(part) 6348 if "text" in maintype: 6349 if payload: 6350 content.append(self.encode_text(payload, part_charset)) 6351 if "%s.size" % tablename in colnames: 6352 if part is not None: 6353 size += len(str(part)) 6354 item_dict["%s.content" % tablename] = bar_encode(content) 6355 item_dict["%s.attachments" % tablename] = attachments 6356 item_dict["%s.size" % tablename] = size 6357 imapqry_list.append(item_dict) 6358 6359 # extra object mapping for the sake of rows object 6360 # creation (sends an array or lists) 6361 for item_dict in imapqry_list: 6362 imapqry_array_item = list() 6363 for fieldname in colnames: 6364 imapqry_array_item.append(item_dict[fieldname]) 6365 imapqry_array.append(imapqry_array_item) 6366 6367 # parse result and return a rows object 6368 colnames = colnames 6369 processor = attributes.get('processor',self.parse) 6370 return processor(imapqry_array, fields, colnames)
6371
6372 - def _update(self, tablename, query, fields, commit=False):
6373 # TODO: the adapter should implement an .expand method 6374 commands = list() 6375 if use_common_filters(query): 6376 query = self.common_filter(query, [tablename,]) 6377 mark = [] 6378 unmark = [] 6379 if query: 6380 for item in fields: 6381 field = item[0] 6382 name = field.name 6383 value = item[1] 6384 if self.is_flag(name): 6385 flag = self.search_fields[name] 6386 if (value is not None) and (flag != "\\Recent"): 6387 if value: 6388 mark.append(flag) 6389 else: 6390 unmark.append(flag) 6391 result, data = self.connection.select( 6392 self.connection.mailbox_names[tablename]) 6393 string_query = "(%s)" % query 6394 result, data = self.connection.search(None, string_query) 6395 store_list = [item.strip() for item in data[0].split() 6396 if item.strip().isdigit()] 6397 # build commands for marked flags 6398 for number in store_list: 6399 result = None 6400 if len(mark) > 0: 6401 commands.append((number, "+FLAGS", "(%s)" % " ".join(mark))) 6402 if len(unmark) > 0: 6403 commands.append((number, "-FLAGS", "(%s)" % " ".join(unmark))) 6404 return commands
6405
6406 - def update(self, tablename, query, fields):
6407 rowcount = 0 6408 commands = self._update(tablename, query, fields) 6409 for command in commands: 6410 result, data = self.connection.store(*command) 6411 if result == "OK": 6412 rowcount += 1 6413 else: 6414 raise Exception("IMAP storing error: %s" % data) 6415 return rowcount
6416
6417 - def _count(self, query, distinct=None):
6418 raise NotImplementedError()
6419
6420 - def count(self,query,distinct=None):
6421 counter = 0 6422 tablename = self.get_query_mailbox(query) 6423 if query and tablename is not None: 6424 if use_common_filters(query): 6425 query = self.common_filter(query, [tablename,]) 6426 result, data = self.connection.select(self.connection.mailbox_names[tablename]) 6427 string_query = "(%s)" % query 6428 result, data = self.connection.search(None, string_query) 6429 store_list = [item.strip() for item in data[0].split() if item.strip().isdigit()] 6430 counter = len(store_list) 6431 return counter
6432
6433 - def delete(self, tablename, query):
6434 counter = 0 6435 if query: 6436 if use_common_filters(query): 6437 query = self.common_filter(query, [tablename,]) 6438 result, data = self.connection.select(self.connection.mailbox_names[tablename]) 6439 string_query = "(%s)" % query 6440 result, data = self.connection.search(None, string_query) 6441 store_list = [item.strip() for item in data[0].split() if item.strip().isdigit()] 6442 for number in store_list: 6443 result, data = self.connection.store(number, "+FLAGS", "(\\Deleted)") 6444 if result == "OK": 6445 counter += 1 6446 else: 6447 raise Exception("IMAP store error: %s" % data) 6448 if counter > 0: 6449 result, data = self.connection.expunge() 6450 return counter
6451
6452 - def BELONGS(self, first, second):
6453 result = None 6454 name = self.search_fields[first.name] 6455 if name == "MESSAGE": 6456 values = [str(val) for val in second if str(val).isdigit()] 6457 result = "%s" % ",".join(values).strip() 6458 6459 elif name == "UID": 6460 values = [str(val) for val in second if str(val).isdigit()] 6461 result = "UID %s" % ",".join(values).strip() 6462 6463 else: 6464 raise Exception("Operation not supported") 6465 # result = "(%s %s)" % (self.expand(first), self.expand(second)) 6466 return result
6467
6468 - def CONTAINS(self, first, second, case_sensitive=False):
6469 # silently ignore, only case sensitive 6470 result = None 6471 name = self.search_fields[first.name] 6472 6473 if name in ("FROM", "TO", "SUBJECT", "TEXT"): 6474 result = "%s \"%s\"" % (name, self.expand(second)) 6475 else: 6476 if first.name in ("cc", "bcc"): 6477 result = "%s \"%s\"" % (first.name.upper(), self.expand(second)) 6478 elif first.name == "mime": 6479 result = "HEADER Content-Type \"%s\"" % self.expand(second) 6480 else: 6481 raise Exception("Operation not supported") 6482 return result
6483
6484 - def GT(self, first, second):
6485 result = None 6486 name = self.search_fields[first.name] 6487 if name == "MESSAGE": 6488 last_message = self.get_last_message(first.tablename) 6489 result = "%d:%d" % (int(self.expand(second)) + 1, last_message) 6490 elif name == "UID": 6491 # GT and LT may not return 6492 # expected sets depending on 6493 # the uid format implemented 6494 try: 6495 pedestal, threshold = self.get_uid_bounds(first.tablename) 6496 except TypeError: 6497 e = sys.exc_info()[1] 6498 LOGGER.debug("Error requesting uid bounds: %s", str(e)) 6499 return "" 6500 try: 6501 lower_limit = int(self.expand(second)) + 1 6502 except (ValueError, TypeError): 6503 e = sys.exc_info()[1] 6504 raise Exception("Operation not supported (non integer UID)") 6505 result = "UID %s:%s" % (lower_limit, threshold) 6506 elif name == "DATE": 6507 result = "SINCE %s" % self.convert_date(second, add=datetime.timedelta(1)) 6508 elif name == "SIZE": 6509 result = "LARGER %s" % self.expand(second) 6510 else: 6511 raise Exception("Operation not supported") 6512 return result
6513
6514 - def GE(self, first, second):
6515 result = None 6516 name = self.search_fields[first.name] 6517 if name == "MESSAGE": 6518 last_message = self.get_last_message(first.tablename) 6519 result = "%s:%s" % (self.expand(second), last_message) 6520 elif name == "UID": 6521 # GT and LT may not return 6522 # expected sets depending on 6523 # the uid format implemented 6524 try: 6525 pedestal, threshold = self.get_uid_bounds(first.tablename) 6526 except TypeError: 6527 e = sys.exc_info()[1] 6528 LOGGER.debug("Error requesting uid bounds: %s", str(e)) 6529 return "" 6530 lower_limit = self.expand(second) 6531 result = "UID %s:%s" % (lower_limit, threshold) 6532 elif name == "DATE": 6533 result = "SINCE %s" % self.convert_date(second) 6534 else: 6535 raise Exception("Operation not supported") 6536 return result
6537
6538 - def LT(self, first, second):
6539 result = None 6540 name = self.search_fields[first.name] 6541 if name == "MESSAGE": 6542 result = "%s:%s" % (1, int(self.expand(second)) - 1) 6543 elif name == "UID": 6544 try: 6545 pedestal, threshold = self.get_uid_bounds(first.tablename) 6546 except TypeError: 6547 e = sys.exc_info()[1] 6548 LOGGER.debug("Error requesting uid bounds: %s", str(e)) 6549 return "" 6550 try: 6551 upper_limit = int(self.expand(second)) - 1 6552 except (ValueError, TypeError): 6553 e = sys.exc_info()[1] 6554 raise Exception("Operation not supported (non integer UID)") 6555 result = "UID %s:%s" % (pedestal, upper_limit) 6556 elif name == "DATE": 6557 result = "BEFORE %s" % self.convert_date(second) 6558 elif name == "SIZE": 6559 result = "SMALLER %s" % self.expand(second) 6560 else: 6561 raise Exception("Operation not supported") 6562 return result
6563
6564 - def LE(self, first, second):
6565 result = None 6566 name = self.search_fields[first.name] 6567 if name == "MESSAGE": 6568 result = "%s:%s" % (1, self.expand(second)) 6569 elif name == "UID": 6570 try: 6571 pedestal, threshold = self.get_uid_bounds(first.tablename) 6572 except TypeError: 6573 e = sys.exc_info()[1] 6574 LOGGER.debug("Error requesting uid bounds: %s", str(e)) 6575 return "" 6576 upper_limit = int(self.expand(second)) 6577 result = "UID %s:%s" % (pedestal, upper_limit) 6578 elif name == "DATE": 6579 result = "BEFORE %s" % self.convert_date(second, add=datetime.timedelta(1)) 6580 else: 6581 raise Exception("Operation not supported") 6582 return result
6583
6584 - def NE(self, first, second=None):
6585 if (second is None) and isinstance(first, Field): 6586 # All records special table query 6587 if first.type == "id": 6588 return self.GE(first, 1) 6589 result = self.NOT(self.EQ(first, second)) 6590 result = result.replace("NOT NOT", "").strip() 6591 return result
6592
6593 - def EQ(self,first,second):
6594 name = self.search_fields[first.name] 6595 result = None 6596 if name is not None: 6597 if name == "MESSAGE": 6598 # query by message sequence number 6599 result = "%s" % self.expand(second) 6600 elif name == "UID": 6601 result = "UID %s" % self.expand(second) 6602 elif name == "DATE": 6603 result = "ON %s" % self.convert_date(second) 6604 6605 elif name in self.flags: 6606 if second: 6607 result = "%s" % (name.upper()[1:]) 6608 else: 6609 result = "NOT %s" % (name.upper()[1:]) 6610 else: 6611 raise Exception("Operation not supported") 6612 else: 6613 raise Exception("Operation not supported") 6614 return result
6615
6616 - def AND(self, first, second):
6617 result = "%s %s" % (self.expand(first), self.expand(second)) 6618 return result
6619
6620 - def OR(self, first, second):
6621 result = "OR %s %s" % (self.expand(first), self.expand(second)) 6622 return "%s" % result.replace("OR OR", "OR")
6623
6624 - def NOT(self, first):
6625 result = "NOT %s" % self.expand(first) 6626 return result
6627 6628 ######################################################################## 6629 # end of adapters 6630 ######################################################################## 6631 6632 ADAPTERS = { 6633 'sqlite': SQLiteAdapter, 6634 'spatialite': SpatiaLiteAdapter, 6635 'sqlite:memory': SQLiteAdapter, 6636 'spatialite:memory': SpatiaLiteAdapter, 6637 'mysql': MySQLAdapter, 6638 'postgres': PostgreSQLAdapter, 6639 'postgres:psycopg2': PostgreSQLAdapter, 6640 'postgres:pg8000': PostgreSQLAdapter, 6641 'postgres2:psycopg2': NewPostgreSQLAdapter, 6642 'postgres2:pg8000': NewPostgreSQLAdapter, 6643 'oracle': OracleAdapter, 6644 'mssql': MSSQLAdapter, 6645 'mssql2': MSSQL2Adapter, 6646 'mssql3': MSSQL3Adapter, 6647 'sybase': SybaseAdapter, 6648 'db2': DB2Adapter, 6649 'teradata': TeradataAdapter, 6650 'informix': InformixAdapter, 6651 'informix-se': InformixSEAdapter, 6652 'firebird': FireBirdAdapter, 6653 'firebird_embedded': FireBirdAdapter, 6654 'ingres': IngresAdapter, 6655 'ingresu': IngresUnicodeAdapter, 6656 'sapdb': SAPDBAdapter, 6657 'cubrid': CubridAdapter, 6658 'jdbc:sqlite': JDBCSQLiteAdapter, 6659 'jdbc:sqlite:memory': JDBCSQLiteAdapter, 6660 'jdbc:postgres': JDBCPostgreSQLAdapter, 6661 'gae': GoogleDatastoreAdapter, # discouraged, for backward compatibility 6662 'google:datastore': GoogleDatastoreAdapter, 6663 'google:sql': GoogleSQLAdapter, 6664 'couchdb': CouchDBAdapter, 6665 'mongodb': MongoDBAdapter, 6666 'imap': IMAPAdapter 6667 }
6668 6669 6670 -def sqlhtml_validators(field):
6671 """ 6672 Field type validation, using web2py's validators mechanism. 6673 6674 makes sure the content of a field is in line with the declared 6675 fieldtype 6676 """ 6677 db = field.db 6678 if not have_validators: 6679 return [] 6680 field_type, field_length = field.type, field.length 6681 if isinstance(field_type, SQLCustomType): 6682 if hasattr(field_type, 'validator'): 6683 return field_type.validator 6684 else: 6685 field_type = field_type.type 6686 elif not isinstance(field_type,str): 6687 return [] 6688 requires=[] 6689 def ff(r,id): 6690 row=r(id) 6691 if not row: 6692 return id 6693 elif hasattr(r, '_format') and isinstance(r._format,str): 6694 return r._format % row 6695 elif hasattr(r, '_format') and callable(r._format): 6696 return r._format(row) 6697 else: 6698 return id
6699 if field_type in (('string', 'text', 'password')): 6700 requires.append(validators.IS_LENGTH(field_length)) 6701 elif field_type == 'json': 6702 requires.append(validators.IS_EMPTY_OR(validators.IS_JSON())) 6703 elif field_type == 'double' or field_type == 'float': 6704 requires.append(validators.IS_FLOAT_IN_RANGE(-1e100, 1e100)) 6705 elif field_type in ('integer','bigint'): 6706 requires.append(validators.IS_INT_IN_RANGE(-1e100, 1e100)) 6707 elif field_type.startswith('decimal'): 6708 requires.append(validators.IS_DECIMAL_IN_RANGE(-10**10, 10**10)) 6709 elif field_type == 'date': 6710 requires.append(validators.IS_DATE()) 6711 elif field_type == 'time': 6712 requires.append(validators.IS_TIME()) 6713 elif field_type == 'datetime': 6714 requires.append(validators.IS_DATETIME()) 6715 elif db and field_type.startswith('reference') and \ 6716 field_type.find('.') < 0 and \ 6717 field_type[10:] in db.tables: 6718 referenced = db[field_type[10:]] 6719 def repr_ref(id, row=None, r=referenced, f=ff): return f(r, id) 6720 field.represent = field.represent or repr_ref 6721 if hasattr(referenced, '_format') and referenced._format: 6722 requires = validators.IS_IN_DB(db,referenced._id, 6723 referenced._format) 6724 if field.unique: 6725 requires._and = validators.IS_NOT_IN_DB(db,field) 6726 if field.tablename == field_type[10:]: 6727 return validators.IS_EMPTY_OR(requires) 6728 return requires 6729 elif db and field_type.startswith('list:reference') and \ 6730 field_type.find('.') < 0 and \ 6731 field_type[15:] in db.tables: 6732 referenced = db[field_type[15:]] 6733 def list_ref_repr(ids, row=None, r=referenced, f=ff): 6734 if not ids: 6735 return None 6736 refs = None 6737 db, id = r._db, r._id 6738 if isinstance(db._adapter, GoogleDatastoreAdapter): 6739 def count(values): return db(id.belongs(values)).select(id) 6740 rx = range(0, len(ids), 30) 6741 refs = reduce(lambda a,b:a&b, [count(ids[i:i+30]) for i in rx]) 6742 else: 6743 refs = db(id.belongs(ids)).select(id) 6744 return (refs and ', '.join(str(f(r,x.id)) for x in refs) or '') 6745 field.represent = field.represent or list_ref_repr 6746 if hasattr(referenced, '_format') and referenced._format: 6747 requires = validators.IS_IN_DB(db,referenced._id, 6748 referenced._format,multiple=True) 6749 else: 6750 requires = validators.IS_IN_DB(db,referenced._id, 6751 multiple=True) 6752 if field.unique: 6753 requires._and = validators.IS_NOT_IN_DB(db,field) 6754 return requires 6755 elif field_type.startswith('list:'): 6756 def repr_list(values,row=None): return', '.join(str(v) for v in (values or [])) 6757 field.represent = field.represent or repr_list 6758 if field.unique: 6759 requires.insert(0,validators.IS_NOT_IN_DB(db,field)) 6760 sff = ['in', 'do', 'da', 'ti', 'de', 'bo'] 6761 if field.notnull and not field_type[:2] in sff: 6762 requires.insert(0, validators.IS_NOT_EMPTY()) 6763 elif not field.notnull and field_type[:2] in sff and requires: 6764 requires[-1] = validators.IS_EMPTY_OR(requires[-1]) 6765 return requires 6766
6767 6768 -def bar_escape(item):
6769 return str(item).replace('|', '||')
6770
6771 -def bar_encode(items):
6772 return '|%s|' % '|'.join(bar_escape(item) for item in items if str(item).strip())
6773
6774 -def bar_decode_integer(value):
6775 if not hasattr(value,'split') and hasattr(value,'read'): 6776 value = value.read() 6777 return [int(x) for x in value.split('|') if x.strip()]
6778
6779 -def bar_decode_string(value):
6780 return [x.replace('||', '|') for x in 6781 REGEX_UNPACK.split(value[1:-1]) if x.strip()]
6782
6783 6784 -class Row(object):
6785 6786 """ 6787 a dictionary that lets you do d['a'] as well as d.a 6788 this is only used to store a Row 6789 """ 6790
6791 - def __init__(self,*args,**kwargs):
6792 self.__dict__.update(*args,**kwargs)
6793
6794 - def __getitem__(self, key):
6795 key=str(key) 6796 m = REGEX_TABLE_DOT_FIELD.match(key) 6797 if key in self.get('_extra',{}): 6798 return self._extra[key] 6799 elif m: 6800 try: 6801 return ogetattr(self, m.group(1))[m.group(2)] 6802 except (KeyError,AttributeError,TypeError): 6803 key = m.group(2) 6804 return ogetattr(self, key)
6805
6806 - def __setitem__(self, key, value):
6807 setattr(self, str(key), value)
6808 6809 __delitem__ = delattr 6810 6811 __copy__ = lambda self: Row(self) 6812 6813 __call__ = __getitem__ 6814
6815 - def get(self,key,default=None):
6816 return self.__dict__.get(key,default)
6817
6818 - def __contains__(self,key):
6819 return key in self.__dict__
6820 6821 has_key = __contains__ 6822
6823 - def __nonzero__(self):
6824 return len(self.__dict__)>0
6825
6826 - def update(self, *args, **kwargs):
6827 self.__dict__.update(*args, **kwargs)
6828
6829 - def keys(self):
6830 return self.__dict__.keys()
6831
6832 - def items(self):
6833 return self.__dict__.items()
6834
6835 - def values(self):
6836 return self.__dict__.values()
6837
6838 - def __iter__(self):
6839 return self.__dict__.__iter__()
6840
6841 - def iteritems(self):
6842 return self.__dict__.iteritems()
6843
6844 - def __str__(self):
6845 ### this could be made smarter 6846 return '<Row %s>' % self.as_dict()
6847
6848 - def __repr__(self):
6849 return '<Row %s>' % self.as_dict()
6850
6851 - def __int__(self):
6852 return object.__getattribute__(self,'id')
6853
6854 - def __eq__(self,other):
6855 try: 6856 return self.as_dict() == other.as_dict() 6857 except AttributeError: 6858 return False
6859
6860 - def __ne__(self,other):
6861 return not (self == other)
6862
6863 - def __copy__(self):
6864 return Row(dict(self))
6865
6866 - def as_dict(self, datetime_to_str=False, custom_types=None):
6867 SERIALIZABLE_TYPES = [str, unicode, int, long, float, bool, list, dict] 6868 if isinstance(custom_types,(list,tuple,set)): 6869 SERIALIZABLE_TYPES += custom_types 6870 elif custom_types: 6871 SERIALIZABLE_TYPES.append(custom_types) 6872 d = dict(self) 6873 for k in copy.copy(d.keys()): 6874 v=d[k] 6875 if d[k] is None: 6876 continue 6877 elif isinstance(v,Row): 6878 d[k]=v.as_dict() 6879 elif isinstance(v,Reference): 6880 d[k]=int(v) 6881 elif isinstance(v,decimal.Decimal): 6882 d[k]=float(v) 6883 elif isinstance(v, (datetime.date, datetime.datetime, datetime.time)): 6884 if datetime_to_str: 6885 d[k] = v.isoformat().replace('T',' ')[:19] 6886 elif not isinstance(v,tuple(SERIALIZABLE_TYPES)): 6887 del d[k] 6888 return d
6889
6890 - def as_xml(self, row_name="row", colnames=None, indent=' '):
6891 def f(row,field,indent=' '): 6892 if isinstance(row,Row): 6893 spc = indent+' \n' 6894 items = [f(row[x],x,indent+' ') for x in row] 6895 return '%s<%s>\n%s\n%s</%s>' % ( 6896 indent, 6897 field, 6898 spc.join(item for item in items if item), 6899 indent, 6900 field) 6901 elif not callable(row): 6902 if REGEX_ALPHANUMERIC.match(field): 6903 return '%s<%s>%s</%s>' % (indent,field,row,field) 6904 else: 6905 return '%s<extra name="%s">%s</extra>' % \ 6906 (indent,field,row) 6907 else: 6908 return None
6909 return f(self, row_name, indent=indent)
6910
6911 - def as_json(self, mode="object", default=None, colnames=None, 6912 serialize=True, **kwargs):
6913 """ 6914 serializes the table to a JSON list of objects 6915 kwargs are passed to .as_dict method 6916 only "object" mode supported for single row 6917 6918 serialize = False used by Rows.as_json 6919 TODO: return array mode with query column order 6920 """ 6921 6922 def inner_loop(record, col): 6923 (t, f) = col.split('.') 6924 res = None 6925 if not REGEX_TABLE_DOT_FIELD.match(col): 6926 key = col 6927 res = record._extra[col] 6928 else: 6929 key = f 6930 if isinstance(record.get(t, None), Row): 6931 res = record[t][f] 6932 else: 6933 res = record[f] 6934 if mode == 'object': 6935 return (key, res) 6936 else: 6937 return res
6938 6939 multi = any([isinstance(v, self.__class__) for v in self.values()]) 6940 mode = mode.lower() 6941 if not mode in ['object', 'array']: 6942 raise SyntaxError('Invalid JSON serialization mode: %s' % mode) 6943 6944 if mode=='object' and colnames: 6945 item = dict([inner_loop(self, col) for col in colnames]) 6946 elif colnames: 6947 item = [inner_loop(self, col) for col in colnames] 6948 else: 6949 if not mode == 'object': 6950 raise SyntaxError('Invalid JSON serialization mode: %s' % mode) 6951 6952 if multi: 6953 item = dict() 6954 [item.update(**v.as_dict(**kwargs)) for v in self.values()] 6955 else: 6956 item = self.as_dict(**kwargs) 6957 6958 if serialize: 6959 if have_serializers: 6960 return serializers.json(item, 6961 default=default or 6962 serializers.custom_json) 6963 elif simplejson: 6964 return simplejson.dumps(item) 6965 else: 6966 raise RuntimeError("missing simplejson") 6967 else: 6968 return item 6969
6970 6971 ################################################################################ 6972 # Everything below should be independent of the specifics of the database 6973 # and should work for RDBMs and some NoSQL databases 6974 ################################################################################ 6975 6976 -class SQLCallableList(list):
6977 - def __call__(self):
6978 return copy.copy(self)
6979
6980 -def smart_query(fields,text):
6981 if not isinstance(fields,(list,tuple)): 6982 fields = [fields] 6983 new_fields = [] 6984 for field in fields: 6985 if isinstance(field,Field): 6986 new_fields.append(field) 6987 elif isinstance(field,Table): 6988 for ofield in field: 6989 new_fields.append(ofield) 6990 else: 6991 raise RuntimeError("fields must be a list of fields") 6992 fields = new_fields 6993 field_map = {} 6994 for field in fields: 6995 n = field.name.lower() 6996 if not n in field_map: 6997 field_map[n] = field 6998 n = str(field).lower() 6999 if not n in field_map: 7000 field_map[n] = field 7001 constants = {} 7002 i = 0 7003 while True: 7004 m = REGEX_CONST_STRING.search(text) 7005 if not m: break 7006 text = text[:m.start()]+('#%i' % i)+text[m.end():] 7007 constants[str(i)] = m.group()[1:-1] 7008 i+=1 7009 text = re.sub('\s+',' ',text).lower() 7010 for a,b in [('&','and'), 7011 ('|','or'), 7012 ('~','not'), 7013 ('==','='), 7014 ('<','<'), 7015 ('>','>'), 7016 ('<=','<='), 7017 ('>=','>='), 7018 ('<>','!='), 7019 ('=<','<='), 7020 ('=>','>='), 7021 ('=','='), 7022 (' less or equal than ','<='), 7023 (' greater or equal than ','>='), 7024 (' equal or less than ','<='), 7025 (' equal or greater than ','>='), 7026 (' less or equal ','<='), 7027 (' greater or equal ','>='), 7028 (' equal or less ','<='), 7029 (' equal or greater ','>='), 7030 (' not equal to ','!='), 7031 (' not equal ','!='), 7032 (' equal to ','='), 7033 (' equal ','='), 7034 (' equals ','='), 7035 (' less than ','<'), 7036 (' greater than ','>'), 7037 (' starts with ','startswith'), 7038 (' ends with ','endswith'), 7039 (' not in ' , 'notbelongs'), 7040 (' in ' , 'belongs'), 7041 (' is ','=')]: 7042 if a[0]==' ': 7043 text = text.replace(' is'+a,' %s ' % b) 7044 text = text.replace(a,' %s ' % b) 7045 text = re.sub('\s+',' ',text).lower() 7046 text = re.sub('(?P<a>[\<\>\!\=])\s+(?P<b>[\<\>\!\=])','\g<a>\g<b>',text) 7047 query = field = neg = op = logic = None 7048 for item in text.split(): 7049 if field is None: 7050 if item == 'not': 7051 neg = True 7052 elif not neg and not logic and item in ('and','or'): 7053 logic = item 7054 elif item in field_map: 7055 field = field_map[item] 7056 else: 7057 raise RuntimeError("Invalid syntax") 7058 elif not field is None and op is None: 7059 op = item 7060 elif not op is None: 7061 if item.startswith('#'): 7062 if not item[1:] in constants: 7063 raise RuntimeError("Invalid syntax") 7064 value = constants[item[1:]] 7065 else: 7066 value = item 7067 if field.type in ('text', 'string', 'json'): 7068 if op == '=': op = 'like' 7069 if op == '=': new_query = field==value 7070 elif op == '<': new_query = field<value 7071 elif op == '>': new_query = field>value 7072 elif op == '<=': new_query = field<=value 7073 elif op == '>=': new_query = field>=value 7074 elif op == '!=': new_query = field!=value 7075 elif op == 'belongs': new_query = field.belongs(value.split(',')) 7076 elif op == 'notbelongs': new_query = ~field.belongs(value.split(',')) 7077 elif field.type in ('text', 'string', 'json'): 7078 if op == 'contains': new_query = field.contains(value) 7079 elif op == 'like': new_query = field.like(value) 7080 elif op == 'startswith': new_query = field.startswith(value) 7081 elif op == 'endswith': new_query = field.endswith(value) 7082 else: raise RuntimeError("Invalid operation") 7083 elif field._db._adapter.dbengine=='google:datastore' and \ 7084 field.type in ('list:integer', 'list:string', 'list:reference'): 7085 if op == 'contains': new_query = field.contains(value) 7086 else: raise RuntimeError("Invalid operation") 7087 else: raise RuntimeError("Invalid operation") 7088 if neg: new_query = ~new_query 7089 if query is None: 7090 query = new_query 7091 elif logic == 'and': 7092 query &= new_query 7093 elif logic == 'or': 7094 query |= new_query 7095 field = op = neg = logic = None 7096 return query
7097
7098 -class DAL(object):
7099 7100 """ 7101 an instance of this class represents a database connection 7102 7103 Example:: 7104 7105 db = DAL('sqlite://test.db') 7106 7107 or 7108 7109 db = DAL({"uri": ..., "items": ...}) # experimental 7110 7111 db.define_table('tablename', Field('fieldname1'), 7112 Field('fieldname2')) 7113 """ 7114
7115 - def __new__(cls, uri='sqlite://dummy.db', *args, **kwargs):
7116 if not hasattr(THREAD_LOCAL,'db_instances'): 7117 THREAD_LOCAL.db_instances = {} 7118 if not hasattr(THREAD_LOCAL,'db_instances_zombie'): 7119 THREAD_LOCAL.db_instances_zombie = {} 7120 if uri == '<zombie>': 7121 db_uid = kwargs['db_uid'] # a zombie must have a db_uid! 7122 if db_uid in THREAD_LOCAL.db_instances: 7123 db_group = THREAD_LOCAL.db_instances[db_uid] 7124 db = db_group[-1] 7125 elif db_uid in THREAD_LOCAL.db_instances_zombie: 7126 db = THREAD_LOCAL.db_instances_zombie[db_uid] 7127 else: 7128 db = super(DAL, cls).__new__(cls) 7129 THREAD_LOCAL.db_instances_zombie[db_uid] = db 7130 else: 7131 db_uid = kwargs.get('db_uid',hashlib_md5(repr(uri)).hexdigest()) 7132 if db_uid in THREAD_LOCAL.db_instances_zombie: 7133 db = THREAD_LOCAL.db_instances_zombie[db_uid] 7134 del THREAD_LOCAL.db_instances_zombie[db_uid] 7135 else: 7136 db = super(DAL, cls).__new__(cls) 7137 db_group = THREAD_LOCAL.db_instances.get(db_uid,[]) 7138 db_group.append(db) 7139 THREAD_LOCAL.db_instances[db_uid] = db_group 7140 db._db_uid = db_uid 7141 return db
7142 7143 @staticmethod
7144 - def set_folder(folder):
7145 """ 7146 # ## this allows gluon to set a folder for this thread 7147 # ## <<<<<<<<< Should go away as new DAL replaces old sql.py 7148 """ 7149 BaseAdapter.set_folder(folder)
7150 7151 @staticmethod
7152 - def get_instances():
7153 """ 7154 Returns a dictionary with uri as key with timings and defined tables 7155 {'sqlite://storage.sqlite': { 7156 'dbstats': [(select auth_user.email from auth_user, 0.02009)], 7157 'dbtables': { 7158 'defined': ['auth_cas', 'auth_event', 'auth_group', 7159 'auth_membership', 'auth_permission', 'auth_user'], 7160 'lazy': '[]' 7161 } 7162 } 7163 } 7164 """ 7165 dbs = getattr(THREAD_LOCAL,'db_instances',{}).items() 7166 infos = {} 7167 for db_uid, db_group in dbs: 7168 for db in db_group: 7169 if not db._uri: 7170 continue 7171 k = hide_password(db._uri) 7172 infos[k] = dict(dbstats = [(row[0], row[1]) for row in db._timings], 7173 dbtables = {'defined': 7174 sorted(list(set(db.tables) - 7175 set(db._LAZY_TABLES.keys()))), 7176 'lazy': sorted(db._LAZY_TABLES.keys())} 7177 ) 7178 return infos
7179 7180 @staticmethod
7181 - def distributed_transaction_begin(*instances):
7182 if not instances: 7183 return 7184 thread_key = '%s.%s' % (socket.gethostname(), threading.currentThread()) 7185 keys = ['%s.%i' % (thread_key, i) for (i,db) in instances] 7186 instances = enumerate(instances) 7187 for (i, db) in instances: 7188 if not db._adapter.support_distributed_transaction(): 7189 raise SyntaxError( 7190 'distributed transaction not suported by %s' % db._dbname) 7191 for (i, db) in instances: 7192 db._adapter.distributed_transaction_begin(keys[i])
7193 7194 @staticmethod
7195 - def distributed_transaction_commit(*instances):
7196 if not instances: 7197 return 7198 instances = enumerate(instances) 7199 thread_key = '%s.%s' % (socket.gethostname(), threading.currentThread()) 7200 keys = ['%s.%i' % (thread_key, i) for (i,db) in instances] 7201 for (i, db) in instances: 7202 if not db._adapter.support_distributed_transaction(): 7203 raise SyntaxError( 7204 'distributed transaction not suported by %s' % db._dbanme) 7205 try: 7206 for (i, db) in instances: 7207 db._adapter.prepare(keys[i]) 7208 except: 7209 for (i, db) in instances: 7210 db._adapter.rollback_prepared(keys[i]) 7211 raise RuntimeError('failure to commit distributed transaction') 7212 else: 7213 for (i, db) in instances: 7214 db._adapter.commit_prepared(keys[i]) 7215 return
7216
7217 - def __init__(self, uri=DEFAULT_URI, 7218 pool_size=0, folder=None, 7219 db_codec='UTF-8', check_reserved=None, 7220 migrate=True, fake_migrate=False, 7221 migrate_enabled=True, fake_migrate_all=False, 7222 decode_credentials=False, driver_args=None, 7223 adapter_args=None, attempts=5, auto_import=False, 7224 bigint_id=False,debug=False,lazy_tables=False, 7225 db_uid=None, do_connect=True, after_connection=None):
7226 """ 7227 Creates a new Database Abstraction Layer instance. 7228 7229 Keyword arguments: 7230 7231 :uri: string that contains information for connecting to a database. 7232 (default: 'sqlite://dummy.db') 7233 7234 experimental: you can specify a dictionary as uri 7235 parameter i.e. with 7236 db = DAL({"uri": "sqlite://storage.sqlite", 7237 "items": {...}, ...}) 7238 7239 for an example of dict input you can check the output 7240 of the scaffolding db model with 7241 7242 db.as_dict() 7243 7244 Note that for compatibility with Python older than 7245 version 2.6.5 you should cast your dict input keys 7246 to str due to a syntax limitation on kwarg names. 7247 for proper DAL dictionary input you can use one of: 7248 7249 obj = serializers.cast_keys(dict, [encoding="utf-8"]) 7250 7251 or else (for parsing json input) 7252 7253 obj = serializers.loads_json(data, unicode_keys=False) 7254 7255 :pool_size: How many open connections to make to the database object. 7256 :folder: where .table files will be created. 7257 automatically set within web2py 7258 use an explicit path when using DAL outside web2py 7259 :db_codec: string encoding of the database (default: 'UTF-8') 7260 :check_reserved: list of adapters to check tablenames and column names 7261 against sql/nosql reserved keywords. (Default None) 7262 7263 * 'common' List of sql keywords that are common to all database types 7264 such as "SELECT, INSERT". (recommended) 7265 * 'all' Checks against all known SQL keywords. (not recommended) 7266 <adaptername> Checks against the specific adapters list of keywords 7267 (recommended) 7268 * '<adaptername>_nonreserved' Checks against the specific adapters 7269 list of nonreserved keywords. (if available) 7270 :migrate (defaults to True) sets default migrate behavior for all tables 7271 :fake_migrate (defaults to False) sets default fake_migrate behavior for all tables 7272 :migrate_enabled (defaults to True). If set to False disables ALL migrations 7273 :fake_migrate_all (defaults to False). If sets to True fake migrates ALL tables 7274 :attempts (defaults to 5). Number of times to attempt connecting 7275 :auto_import (defaults to False). If set, import automatically table definitions from the 7276 databases folder 7277 :bigint_id (defaults to False): If set, turn on bigint instead of int for id fields 7278 :lazy_tables (defaults to False): delay table definition until table access 7279 :after_connection (defaults to None): a callable that will be execute after the connection 7280 """ 7281 7282 items = None 7283 if isinstance(uri, dict): 7284 if "items" in uri: 7285 items = uri.pop("items") 7286 try: 7287 newuri = uri.pop("uri") 7288 except KeyError: 7289 newuri = DEFAULT_URI 7290 locals().update(uri) 7291 uri = newuri 7292 7293 if uri == '<zombie>' and db_uid is not None: return 7294 if not decode_credentials: 7295 credential_decoder = lambda cred: cred 7296 else: 7297 credential_decoder = lambda cred: urllib.unquote(cred) 7298 self._folder = folder 7299 if folder: 7300 self.set_folder(folder) 7301 self._uri = uri 7302 self._pool_size = pool_size 7303 self._db_codec = db_codec 7304 self._lastsql = '' 7305 self._timings = [] 7306 self._pending_references = {} 7307 self._request_tenant = 'request_tenant' 7308 self._common_fields = [] 7309 self._referee_name = '%(table)s' 7310 self._bigint_id = bigint_id 7311 self._debug = debug 7312 self._migrated = [] 7313 self._LAZY_TABLES = {} 7314 self._lazy_tables = lazy_tables 7315 self._tables = SQLCallableList() 7316 self._driver_args = driver_args 7317 self._adapter_args = adapter_args 7318 self._check_reserved = check_reserved 7319 self._decode_credentials = decode_credentials 7320 self._attempts = attempts 7321 self._do_connect = do_connect 7322 7323 if not str(attempts).isdigit() or attempts < 0: 7324 attempts = 5 7325 if uri: 7326 uris = isinstance(uri,(list,tuple)) and uri or [uri] 7327 error = '' 7328 connected = False 7329 for k in range(attempts): 7330 for uri in uris: 7331 try: 7332 if is_jdbc and not uri.startswith('jdbc:'): 7333 uri = 'jdbc:'+uri 7334 self._dbname = REGEX_DBNAME.match(uri).group() 7335 if not self._dbname in ADAPTERS: 7336 raise SyntaxError("Error in URI '%s' or database not supported" % self._dbname) 7337 # notice that driver args or {} else driver_args 7338 # defaults to {} global, not correct 7339 kwargs = dict(db=self,uri=uri, 7340 pool_size=pool_size, 7341 folder=folder, 7342 db_codec=db_codec, 7343 credential_decoder=credential_decoder, 7344 driver_args=driver_args or {}, 7345 adapter_args=adapter_args or {}, 7346 do_connect=do_connect, 7347 after_connection=after_connection) 7348 self._adapter = ADAPTERS[self._dbname](**kwargs) 7349 types = ADAPTERS[self._dbname].types 7350 # copy so multiple DAL() possible 7351 self._adapter.types = copy.copy(types) 7352 if bigint_id: 7353 if 'big-id' in types and 'reference' in types: 7354 self._adapter.types['id'] = types['big-id'] 7355 self._adapter.types['reference'] = types['big-reference'] 7356 connected = True 7357 break 7358 except SyntaxError: 7359 raise 7360 except Exception: 7361 tb = traceback.format_exc() 7362 sys.stderr.write('DEBUG: connect attempt %i, connection error:\n%s' % (k, tb)) 7363 if connected: 7364 break 7365 else: 7366 time.sleep(1) 7367 if not connected: 7368 raise RuntimeError("Failure to connect, tried %d times:\n%s" % (attempts, tb)) 7369 else: 7370 self._adapter = BaseAdapter(db=self,pool_size=0, 7371 uri='None',folder=folder, 7372 db_codec=db_codec, after_connection=after_connection) 7373 migrate = fake_migrate = False 7374 adapter = self._adapter 7375 self._uri_hash = hashlib_md5(adapter.uri).hexdigest() 7376 self.check_reserved = check_reserved 7377 if self.check_reserved: 7378 from reserved_sql_keywords import ADAPTERS as RSK 7379 self.RSK = RSK 7380 self._migrate = migrate 7381 self._fake_migrate = fake_migrate 7382 self._migrate_enabled = migrate_enabled 7383 self._fake_migrate_all = fake_migrate_all 7384 if auto_import or items: 7385 self.import_table_definitions(adapter.folder, 7386 items=items)
7387 7388 @property
7389 - def tables(self):
7390 return self._tables
7391
7392 - def import_table_definitions(self, path, migrate=False, 7393 fake_migrate=False, items=None):
7394 pattern = pjoin(path,self._uri_hash+'_*.table') 7395 if items: 7396 for tablename, table in items.iteritems(): 7397 # TODO: read all field/table options 7398 fields = [] 7399 # remove unsupported/illegal Table arguments 7400 [table.pop(name) for name in ("name", "fields") if 7401 name in table] 7402 if "items" in table: 7403 for fieldname, field in table.pop("items").iteritems(): 7404 # remove unsupported/illegal Field arguments 7405 [field.pop(key) for key in ("requires", "name", 7406 "compute", "colname") if key in field] 7407 fields.append(Field(str(fieldname), **field)) 7408 self.define_table(str(tablename), *fields, **table) 7409 else: 7410 for filename in glob.glob(pattern): 7411 tfile = self._adapter.file_open(filename, 'r') 7412 try: 7413 sql_fields = pickle.load(tfile) 7414 name = filename[len(pattern)-7:-6] 7415 mf = [(value['sortable'], 7416 Field(key, 7417 type=value['type'], 7418 length=value.get('length',None), 7419 notnull=value.get('notnull',False), 7420 unique=value.get('unique',False))) \ 7421 for key, value in sql_fields.iteritems()] 7422 mf.sort(lambda a,b: cmp(a[0],b[0])) 7423 self.define_table(name,*[item[1] for item in mf], 7424 **dict(migrate=migrate, 7425 fake_migrate=fake_migrate)) 7426 finally: 7427 self._adapter.file_close(tfile)
7428
7429 - def check_reserved_keyword(self, name):
7430 """ 7431 Validates ``name`` against SQL keywords 7432 Uses self.check_reserve which is a list of 7433 operators to use. 7434 self.check_reserved 7435 ['common', 'postgres', 'mysql'] 7436 self.check_reserved 7437 ['all'] 7438 """ 7439 for backend in self.check_reserved: 7440 if name.upper() in self.RSK[backend]: 7441 raise SyntaxError( 7442 'invalid table/column name "%s" is a "%s" reserved SQL/NOSQL keyword' % (name, backend.upper()))
7443
7444 - def parse_as_rest(self,patterns,args,vars,queries=None,nested_select=True):
7445 """ 7446 EXAMPLE: 7447 7448 db.define_table('person',Field('name'),Field('info')) 7449 db.define_table('pet',Field('ownedby',db.person),Field('name'),Field('info')) 7450 7451 @request.restful() 7452 def index(): 7453 def GET(*args,**vars): 7454 patterns = [ 7455 "/friends[person]", 7456 "/{person.name}/:field", 7457 "/{person.name}/pets[pet.ownedby]", 7458 "/{person.name}/pets[pet.ownedby]/{pet.name}", 7459 "/{person.name}/pets[pet.ownedby]/{pet.name}/:field", 7460 ("/dogs[pet]", db.pet.info=='dog'), 7461 ("/dogs[pet]/{pet.name.startswith}", db.pet.info=='dog'), 7462 ] 7463 parser = db.parse_as_rest(patterns,args,vars) 7464 if parser.status == 200: 7465 return dict(content=parser.response) 7466 else: 7467 raise HTTP(parser.status,parser.error) 7468 7469 def POST(table_name,**vars): 7470 if table_name == 'person': 7471 return db.person.validate_and_insert(**vars) 7472 elif table_name == 'pet': 7473 return db.pet.validate_and_insert(**vars) 7474 else: 7475 raise HTTP(400) 7476 return locals() 7477 """ 7478 7479 db = self 7480 re1 = REGEX_SEARCH_PATTERN 7481 re2 = REGEX_SQUARE_BRACKETS 7482 7483 def auto_table(table,base='',depth=0): 7484 patterns = [] 7485 for field in db[table].fields: 7486 if base: 7487 tag = '%s/%s' % (base,field.replace('_','-')) 7488 else: 7489 tag = '/%s/%s' % (table.replace('_','-'),field.replace('_','-')) 7490 f = db[table][field] 7491 if not f.readable: continue 7492 if f.type=='id' or 'slug' in field or f.type.startswith('reference'): 7493 tag += '/{%s.%s}' % (table,field) 7494 patterns.append(tag) 7495 patterns.append(tag+'/:field') 7496 elif f.type.startswith('boolean'): 7497 tag += '/{%s.%s}' % (table,field) 7498 patterns.append(tag) 7499 patterns.append(tag+'/:field') 7500 elif f.type in ('float','double','integer','bigint'): 7501 tag += '/{%s.%s.ge}/{%s.%s.lt}' % (table,field,table,field) 7502 patterns.append(tag) 7503 patterns.append(tag+'/:field') 7504 elif f.type.startswith('list:'): 7505 tag += '/{%s.%s.contains}' % (table,field) 7506 patterns.append(tag) 7507 patterns.append(tag+'/:field') 7508 elif f.type in ('date','datetime'): 7509 tag+= '/{%s.%s.year}' % (table,field) 7510 patterns.append(tag) 7511 patterns.append(tag+'/:field') 7512 tag+='/{%s.%s.month}' % (table,field) 7513 patterns.append(tag) 7514 patterns.append(tag+'/:field') 7515 tag+='/{%s.%s.day}' % (table,field) 7516 patterns.append(tag) 7517 patterns.append(tag+'/:field') 7518 if f.type in ('datetime','time'): 7519 tag+= '/{%s.%s.hour}' % (table,field) 7520 patterns.append(tag) 7521 patterns.append(tag+'/:field') 7522 tag+='/{%s.%s.minute}' % (table,field) 7523 patterns.append(tag) 7524 patterns.append(tag+'/:field') 7525 tag+='/{%s.%s.second}' % (table,field) 7526 patterns.append(tag) 7527 patterns.append(tag+'/:field') 7528 if depth>0: 7529 for f in db[table]._referenced_by: 7530 tag+='/%s[%s.%s]' % (table,f.tablename,f.name) 7531 patterns.append(tag) 7532 patterns += auto_table(table,base=tag,depth=depth-1) 7533 return patterns
7534 7535 if patterns == 'auto': 7536 patterns=[] 7537 for table in db.tables: 7538 if not table.startswith('auth_'): 7539 patterns.append('/%s[%s]' % (table,table)) 7540 patterns += auto_table(table,base='',depth=1) 7541 else: 7542 i = 0 7543 while i<len(patterns): 7544 pattern = patterns[i] 7545 if not isinstance(pattern,str): 7546 pattern = pattern[0] 7547 tokens = pattern.split('/') 7548 if tokens[-1].startswith(':auto') and re2.match(tokens[-1]): 7549 new_patterns = auto_table(tokens[-1][tokens[-1].find('[')+1:-1], 7550 '/'.join(tokens[:-1])) 7551 patterns = patterns[:i]+new_patterns+patterns[i+1:] 7552 i += len(new_patterns) 7553 else: 7554 i += 1 7555 if '/'.join(args) == 'patterns': 7556 return Row({'status':200,'pattern':'list', 7557 'error':None,'response':patterns}) 7558 for pattern in patterns: 7559 basequery, exposedfields = None, [] 7560 if isinstance(pattern,tuple): 7561 if len(pattern)==2: 7562 pattern, basequery = pattern 7563 elif len(pattern)>2: 7564 pattern, basequery, exposedfields = pattern[0:3] 7565 otable=table=None 7566 if not isinstance(queries,dict): 7567 dbset=db(queries) 7568 if basequery is not None: 7569 dbset = dbset(basequery) 7570 i=0 7571 tags = pattern[1:].split('/') 7572 if len(tags)!=len(args): 7573 continue 7574 for tag in tags: 7575 if re1.match(tag): 7576 # print 're1:'+tag 7577 tokens = tag[1:-1].split('.') 7578 table, field = tokens[0], tokens[1] 7579 if not otable or table == otable: 7580 if len(tokens)==2 or tokens[2]=='eq': 7581 query = db[table][field]==args[i] 7582 elif tokens[2]=='ne': 7583 query = db[table][field]!=args[i] 7584 elif tokens[2]=='lt': 7585 query = db[table][field]<args[i] 7586 elif tokens[2]=='gt': 7587 query = db[table][field]>args[i] 7588 elif tokens[2]=='ge': 7589 query = db[table][field]>=args[i] 7590 elif tokens[2]=='le': 7591 query = db[table][field]<=args[i] 7592 elif tokens[2]=='year': 7593 query = db[table][field].year()==args[i] 7594 elif tokens[2]=='month': 7595 query = db[table][field].month()==args[i] 7596 elif tokens[2]=='day': 7597 query = db[table][field].day()==args[i] 7598 elif tokens[2]=='hour': 7599 query = db[table][field].hour()==args[i] 7600 elif tokens[2]=='minute': 7601 query = db[table][field].minutes()==args[i] 7602 elif tokens[2]=='second': 7603 query = db[table][field].seconds()==args[i] 7604 elif tokens[2]=='startswith': 7605 query = db[table][field].startswith(args[i]) 7606 elif tokens[2]=='contains': 7607 query = db[table][field].contains(args[i]) 7608 else: 7609 raise RuntimeError("invalid pattern: %s" % pattern) 7610 if len(tokens)==4 and tokens[3]=='not': 7611 query = ~query 7612 elif len(tokens)>=4: 7613 raise RuntimeError("invalid pattern: %s" % pattern) 7614 if not otable and isinstance(queries,dict): 7615 dbset = db(queries[table]) 7616 if basequery is not None: 7617 dbset = dbset(basequery) 7618 dbset=dbset(query) 7619 else: 7620 raise RuntimeError("missing relation in pattern: %s" % pattern) 7621 elif re2.match(tag) and args[i]==tag[:tag.find('[')]: 7622 ref = tag[tag.find('[')+1:-1] 7623 if '.' in ref and otable: 7624 table,field = ref.split('.') 7625 selfld = '_id' 7626 if db[table][field].type.startswith('reference '): 7627 refs = [ x.name for x in db[otable] if x.type == db[table][field].type ] 7628 else: 7629 refs = [ x.name for x in db[table]._referenced_by if x.tablename==otable ] 7630 if refs: 7631 selfld = refs[0] 7632 if nested_select: 7633 try: 7634 dbset=db(db[table][field].belongs(dbset._select(db[otable][selfld]))) 7635 except ValueError: 7636 return Row({'status':400,'pattern':pattern, 7637 'error':'invalid path','response':None}) 7638 else: 7639 items = [item.id for item in dbset.select(db[otable][selfld])] 7640 dbset=db(db[table][field].belongs(items)) 7641 else: 7642 table = ref 7643 if not otable and isinstance(queries,dict): 7644 dbset = db(queries[table]) 7645 dbset=dbset(db[table]) 7646 elif tag==':field' and table: 7647 # print 're3:'+tag 7648 field = args[i] 7649 if not field in db[table]: break 7650 # hand-built patterns should respect .readable=False as well 7651 if not db[table][field].readable: 7652 return Row({'status':418,'pattern':pattern, 7653 'error':'I\'m a teapot','response':None}) 7654 try: 7655 distinct = vars.get('distinct', False) == 'True' 7656 offset = int(vars.get('offset',None) or 0) 7657 limits = (offset,int(vars.get('limit',None) or 1000)+offset) 7658 except ValueError: 7659 return Row({'status':400,'error':'invalid limits','response':None}) 7660 items = dbset.select(db[table][field], distinct=distinct, limitby=limits) 7661 if items: 7662 return Row({'status':200,'response':items, 7663 'pattern':pattern}) 7664 else: 7665 return Row({'status':404,'pattern':pattern, 7666 'error':'no record found','response':None}) 7667 elif tag != args[i]: 7668 break 7669 otable = table 7670 i += 1 7671 if i==len(tags) and table: 7672 ofields = vars.get('order',db[table]._id.name).split('|') 7673 try: 7674 orderby = [db[table][f] if not f.startswith('~') else ~db[table][f[1:]] for f in ofields] 7675 except (KeyError, AttributeError): 7676 return Row({'status':400,'error':'invalid orderby','response':None}) 7677 if exposedfields: 7678 fields = [field for field in db[table] if str(field).split('.')[-1] in exposedfields and field.readable] 7679 else: 7680 fields = [field for field in db[table] if field.readable] 7681 count = dbset.count() 7682 try: 7683 offset = int(vars.get('offset',None) or 0) 7684 limits = (offset,int(vars.get('limit',None) or 1000)+offset) 7685 except ValueError: 7686 return Row({'status':400,'error':'invalid limits','response':None}) 7687 if count > limits[1]-limits[0]: 7688 return Row({'status':400,'error':'too many records','response':None}) 7689 try: 7690 response = dbset.select(limitby=limits,orderby=orderby,*fields) 7691 except ValueError: 7692 return Row({'status':400,'pattern':pattern, 7693 'error':'invalid path','response':None}) 7694 return Row({'status':200,'response':response, 7695 'pattern':pattern,'count':count}) 7696 return Row({'status':400,'error':'no matching pattern','response':None})
7697
7698 - def define_table( 7699 self, 7700 tablename, 7701 *fields, 7702 **args 7703 ):
7704 if not isinstance(tablename,str): 7705 raise SyntaxError("missing table name") 7706 elif hasattr(self,tablename) or tablename in self.tables: 7707 if not args.get('redefine',False): 7708 raise SyntaxError('table already defined: %s' % tablename) 7709 elif tablename.startswith('_') or hasattr(self,tablename) or \ 7710 REGEX_PYTHON_KEYWORDS.match(tablename): 7711 raise SyntaxError('invalid table name: %s' % tablename) 7712 elif self.check_reserved: 7713 self.check_reserved_keyword(tablename) 7714 else: 7715 invalid_args = set(args)-TABLE_ARGS 7716 if invalid_args: 7717 raise SyntaxError('invalid table "%s" attributes: %s' \ 7718 % (tablename,invalid_args)) 7719 if self._lazy_tables and not tablename in self._LAZY_TABLES: 7720 self._LAZY_TABLES[tablename] = (tablename,fields,args) 7721 table = None 7722 else: 7723 table = self.lazy_define_table(tablename,*fields,**args) 7724 if not tablename in self.tables: 7725 self.tables.append(tablename) 7726 return table
7727
7728 - def lazy_define_table( 7729 self, 7730 tablename, 7731 *fields, 7732 **args 7733 ):
7734 args_get = args.get 7735 common_fields = self._common_fields 7736 if common_fields: 7737 fields = list(fields) + list(common_fields) 7738 7739 table_class = args_get('table_class',Table) 7740 table = table_class(self, tablename, *fields, **args) 7741 table._actual = True 7742 self[tablename] = table 7743 # must follow above line to handle self references 7744 table._create_references() 7745 for field in table: 7746 if field.requires == DEFAULT: 7747 field.requires = sqlhtml_validators(field) 7748 7749 migrate = self._migrate_enabled and args_get('migrate',self._migrate) 7750 if migrate and not self._uri in (None,'None') \ 7751 or self._adapter.dbengine=='google:datastore': 7752 fake_migrate = self._fake_migrate_all or \ 7753 args_get('fake_migrate',self._fake_migrate) 7754 polymodel = args_get('polymodel',None) 7755 try: 7756 GLOBAL_LOCKER.acquire() 7757 self._lastsql = self._adapter.create_table( 7758 table,migrate=migrate, 7759 fake_migrate=fake_migrate, 7760 polymodel=polymodel) 7761 finally: 7762 GLOBAL_LOCKER.release() 7763 else: 7764 table._dbt = None 7765 on_define = args_get('on_define',None) 7766 if on_define: on_define(table) 7767 return table
7768
7769 - def as_dict(self, flat=False, sanitize=True, field_options=True):
7770 dbname = db_uid = uri = None 7771 if not sanitize: 7772 uri, dbname, db_uid = (self._uri, self._dbname, self._db_uid) 7773 db_as_dict = dict(items={}, tables=[], uri=uri, dbname=dbname, 7774 db_uid=db_uid, 7775 **dict([(k, getattr(self, "_" + k)) for 7776 k in 'pool_size','folder','db_codec', 7777 'check_reserved','migrate','fake_migrate', 7778 'migrate_enabled','fake_migrate_all', 7779 'decode_credentials','driver_args', 7780 'adapter_args', 'attempts', 7781 'bigint_id','debug','lazy_tables', 7782 'do_connect'])) 7783 7784 for table in self: 7785 tablename = str(table) 7786 db_as_dict["tables"].append(tablename) 7787 db_as_dict["items"][tablename] = table.as_dict(flat=flat, 7788 sanitize=sanitize, 7789 field_options=field_options) 7790 return db_as_dict
7791
7792 - def as_xml(self, sanitize=True, field_options=True):
7793 if not have_serializers: 7794 raise ImportError("No xml serializers available") 7795 d = self.as_dict(flat=True, sanitize=sanitize, 7796 field_options=field_options) 7797 return serializers.xml(d)
7798
7799 - def as_json(self, sanitize=True, field_options=True):
7800 if not have_serializers: 7801 raise ImportError("No json serializers available") 7802 d = self.as_dict(flat=True, sanitize=sanitize, 7803 field_options=field_options) 7804 return serializers.json(d)
7805
7806 - def as_yaml(self, sanitize=True, field_options=True):
7807 if not have_serializers: 7808 raise ImportError("No YAML serializers available") 7809 d = self.as_dict(flat=True, sanitize=sanitize, 7810 field_options=field_options) 7811 return serializers.yaml(d)
7812
7813 - def __contains__(self, tablename):
7814 try: 7815 return tablename in self.tables 7816 except AttributeError: 7817 # The instance has no .tables attribute yet 7818 return False
7819 7820 has_key = __contains__ 7821
7822 - def get(self,key,default=None):
7823 return self.__dict__.get(key,default)
7824
7825 - def __iter__(self):
7826 for tablename in self.tables: 7827 yield self[tablename]
7828
7829 - def __getitem__(self, key):
7830 return self.__getattr__(str(key))
7831
7832 - def __getattr__(self, key):
7833 if ogetattr(self,'_lazy_tables') and \ 7834 key in ogetattr(self,'_LAZY_TABLES'): 7835 tablename, fields, args = self._LAZY_TABLES.pop(key) 7836 return self.lazy_define_table(tablename,*fields,**args) 7837 return ogetattr(self, key)
7838
7839 - def __setitem__(self, key, value):
7840 osetattr(self, str(key), value)
7841
7842 - def __setattr__(self, key, value):
7843 if key[:1]!='_' and key in self: 7844 raise SyntaxError( 7845 'Object %s exists and cannot be redefined' % key) 7846 osetattr(self,key,value)
7847 7848 __delitem__ = object.__delattr__ 7849
7850 - def __repr__(self):
7851 if hasattr(self,'_uri'): 7852 return '<DAL uri="%s">' % hide_password(str(self._uri)) 7853 else: 7854 return '<DAL db_uid="%s">' % self._db_uid
7855
7856 - def smart_query(self,fields,text):
7857 return Set(self, smart_query(fields,text))
7858
7859 - def __call__(self, query=None, ignore_common_filters=None):
7860 if isinstance(query,Table): 7861 query = self._adapter.id_query(query) 7862 elif isinstance(query,Field): 7863 query = query!=None 7864 elif isinstance(query, dict): 7865 icf = query.get("ignore_common_filters") 7866 if icf: ignore_common_filters = icf 7867 return Set(self, query, ignore_common_filters=ignore_common_filters)
7868
7869 - def commit(self):
7870 self._adapter.commit()
7871
7872 - def rollback(self):
7873 self._adapter.rollback()
7874
7875 - def close(self):
7876 self._adapter.close() 7877 if self._db_uid in THREAD_LOCAL.db_instances: 7878 db_group = THREAD_LOCAL.db_instances[self._db_uid] 7879 db_group.remove(self) 7880 if not db_group: 7881 del THREAD_LOCAL.db_instances[self._db_uid]
7882
7883 - def executesql(self, query, placeholders=None, as_dict=False, 7884 fields=None, colnames=None):
7885 """ 7886 placeholders is optional and will always be None. 7887 If using raw SQL with placeholders, placeholders may be 7888 a sequence of values to be substituted in 7889 or, (if supported by the DB driver), a dictionary with keys 7890 matching named placeholders in your SQL. 7891 7892 Added 2009-12-05 "as_dict" optional argument. Will always be 7893 None when using DAL. If using raw SQL can be set to True 7894 and the results cursor returned by the DB driver will be 7895 converted to a sequence of dictionaries keyed with the db 7896 field names. Tested with SQLite but should work with any database 7897 since the cursor.description used to get field names is part of the 7898 Python dbi 2.0 specs. Results returned with as_dict=True are 7899 the same as those returned when applying .to_list() to a DAL query. 7900 7901 [{field1: value1, field2: value2}, {field1: value1b, field2: value2b}] 7902 7903 Added 2012-08-24 "fields" and "colnames" optional arguments. If either 7904 is provided, the results cursor returned by the DB driver will be 7905 converted to a DAL Rows object using the db._adapter.parse() method. 7906 7907 The "fields" argument is a list of DAL Field objects that match the 7908 fields returned from the DB. The Field objects should be part of one or 7909 more Table objects defined on the DAL object. The "fields" list can 7910 include one or more DAL Table objects in addition to or instead of 7911 including Field objects, or it can be just a single table (not in a 7912 list). In that case, the Field objects will be extracted from the 7913 table(s). 7914 7915 Instead of specifying the "fields" argument, the "colnames" argument 7916 can be specified as a list of field names in tablename.fieldname format. 7917 Again, these should represent tables and fields defined on the DAL 7918 object. 7919 7920 It is also possible to specify both "fields" and the associated 7921 "colnames". In that case, "fields" can also include DAL Expression 7922 objects in addition to Field objects. For Field objects in "fields", 7923 the associated "colnames" must still be in tablename.fieldname format. 7924 For Expression objects in "fields", the associated "colnames" can 7925 be any arbitrary labels. 7926 7927 Note, the DAL Table objects referred to by "fields" or "colnames" can 7928 be dummy tables and do not have to represent any real tables in the 7929 database. Also, note that the "fields" and "colnames" must be in the 7930 same order as the fields in the results cursor returned from the DB. 7931 """ 7932 adapter = self._adapter 7933 if placeholders: 7934 adapter.execute(query, placeholders) 7935 else: 7936 adapter.execute(query) 7937 if as_dict: 7938 if not hasattr(adapter.cursor,'description'): 7939 raise RuntimeError("database does not support executesql(...,as_dict=True)") 7940 # Non-DAL legacy db query, converts cursor results to dict. 7941 # sequence of 7-item sequences. each sequence tells about a column. 7942 # first item is always the field name according to Python Database API specs 7943 columns = adapter.cursor.description 7944 # reduce the column info down to just the field names 7945 fields = [f[0] for f in columns] 7946 # will hold our finished resultset in a list 7947 data = adapter._fetchall() 7948 # convert the list for each row into a dictionary so it's 7949 # easier to work with. row['field_name'] rather than row[0] 7950 return [dict(zip(fields,row)) for row in data] 7951 try: 7952 data = adapter._fetchall() 7953 except: 7954 return None 7955 if fields or colnames: 7956 fields = [] if fields is None else fields 7957 if not isinstance(fields, list): 7958 fields = [fields] 7959 extracted_fields = [] 7960 for field in fields: 7961 if isinstance(field, Table): 7962 extracted_fields.extend([f for f in field]) 7963 else: 7964 extracted_fields.append(field) 7965 if not colnames: 7966 colnames = ['%s.%s' % (f.tablename, f.name) 7967 for f in extracted_fields] 7968 data = adapter.parse( 7969 data, fields=extracted_fields, colnames=colnames) 7970 return data
7971
7972 - def _remove_references_to(self, thistable):
7973 for table in self: 7974 table._referenced_by = [field for field in table._referenced_by 7975 if not field.table==thistable]
7976
7977 - def export_to_csv_file(self, ofile, *args, **kwargs):
7978 step = int(kwargs.get('max_fetch_rows,',500)) 7979 write_colnames = kwargs['write_colnames'] = \ 7980 kwargs.get("write_colnames", True) 7981 for table in self.tables: 7982 ofile.write('TABLE %s\r\n' % table) 7983 query = self._adapter.id_query(self[table]) 7984 nrows = self(query).count() 7985 kwargs['write_colnames'] = write_colnames 7986 for k in range(0,nrows,step): 7987 self(query).select(limitby=(k,k+step)).export_to_csv_file( 7988 ofile, *args, **kwargs) 7989 kwargs['write_colnames'] = False 7990 ofile.write('\r\n\r\n') 7991 ofile.write('END')
7992
7993 - def import_from_csv_file(self, ifile, id_map=None, null='<NULL>', 7994 unique='uuid', *args, **kwargs):
7995 #if id_map is None: id_map={} 7996 id_offset = {} # only used if id_map is None 7997 for line in ifile: 7998 line = line.strip() 7999 if not line: 8000 continue 8001 elif line == 'END': 8002 return 8003 elif not line.startswith('TABLE ') or not line[6:] in self.tables: 8004 raise SyntaxError('invalid file format') 8005 else: 8006 tablename = line[6:] 8007 self[tablename].import_from_csv_file( 8008 ifile, id_map, null, unique, id_offset, *args, **kwargs)
8009
8010 -def DAL_unpickler(db_uid):
8011 return DAL('<zombie>',db_uid=db_uid)
8012
8013 -def DAL_pickler(db):
8014 return DAL_unpickler, (db._db_uid,)
8015 8016 copyreg.pickle(DAL, DAL_pickler, DAL_unpickler)
8017 8018 -class SQLALL(object):
8019 """ 8020 Helper class providing a comma-separated string having all the field names 8021 (prefixed by table name and '.') 8022 8023 normally only called from within gluon.sql 8024 """ 8025
8026 - def __init__(self, table):
8027 self._table = table
8028
8029 - def __str__(self):
8030 return ', '.join([str(field) for field in self._table])
8031
8032 # class Reference(int): 8033 -class Reference(long):
8034
8035 - def __allocate(self):
8036 if not self._record: 8037 self._record = self._table[int(self)] 8038 if not self._record: 8039 raise RuntimeError( 8040 "Using a recursive select but encountered a broken reference: %s %d"%(self._table, int(self)))
8041
8042 - def __getattr__(self, key):
8043 if key == 'id': 8044 return int(self) 8045 self.__allocate() 8046 return self._record.get(key, None)
8047
8048 - def get(self, key, default=None):
8049 return self.__getattr__(key, default)
8050
8051 - def __setattr__(self, key, value):
8052 if key.startswith('_'): 8053 int.__setattr__(self, key, value) 8054 return 8055 self.__allocate() 8056 self._record[key] = value
8057
8058 - def __getitem__(self, key):
8059 if key == 'id': 8060 return int(self) 8061 self.__allocate() 8062 return self._record.get(key, None)
8063
8064 - def __setitem__(self,key,value):
8065 self.__allocate() 8066 self._record[key] = value
8067
8068 8069 -def Reference_unpickler(data):
8070 return marshal.loads(data)
8071
8072 -def Reference_pickler(data):
8073 try: 8074 marshal_dump = marshal.dumps(int(data)) 8075 except AttributeError: 8076 marshal_dump = 'i%s' % struct.pack('<i', int(data)) 8077 return (Reference_unpickler, (marshal_dump,))
8078 8079 copyreg.pickle(Reference, Reference_pickler, Reference_unpickler)
8080 8081 8082 -class Table(object):
8083 8084 """ 8085 an instance of this class represents a database table 8086 8087 Example:: 8088 8089 db = DAL(...) 8090 db.define_table('users', Field('name')) 8091 db.users.insert(name='me') # print db.users._insert(...) to see SQL 8092 db.users.drop() 8093 """ 8094
8095 - def __init__( 8096 self, 8097 db, 8098 tablename, 8099 *fields, 8100 **args 8101 ):
8102 """ 8103 Initializes the table and performs checking on the provided fields. 8104 8105 Each table will have automatically an 'id'. 8106 8107 If a field is of type Table, the fields (excluding 'id') from that table 8108 will be used instead. 8109 8110 :raises SyntaxError: when a supplied field is of incorrect type. 8111 """ 8112 self._actual = False # set to True by define_table() 8113 self._tablename = tablename 8114 self._sequence_name = args.get('sequence_name',None) or \ 8115 db and db._adapter.sequence_name(tablename) 8116 self._trigger_name = args.get('trigger_name',None) or \ 8117 db and db._adapter.trigger_name(tablename) 8118 self._common_filter = args.get('common_filter', None) 8119 self._format = args.get('format',None) 8120 self._singular = args.get( 8121 'singular',tablename.replace('_',' ').capitalize()) 8122 self._plural = args.get( 8123 'plural',pluralize(self._singular.lower()).capitalize()) 8124 # horrible but for backard compatibility of appamdin: 8125 if 'primarykey' in args and args['primarykey']: 8126 self._primarykey = args.get('primarykey', None) 8127 8128 self._before_insert = [] 8129 self._before_update = [Set.delete_uploaded_files] 8130 self._before_delete = [Set.delete_uploaded_files] 8131 self._after_insert = [] 8132 self._after_update = [] 8133 self._after_delete = [] 8134 8135 fieldnames,newfields=set(),[] 8136 if hasattr(self,'_primarykey'): 8137 if not isinstance(self._primarykey,list): 8138 raise SyntaxError( 8139 "primarykey must be a list of fields from table '%s'" \ 8140 % tablename) 8141 if len(self._primarykey)==1: 8142 self._id = [f for f in fields if isinstance(f,Field) \ 8143 and f.name==self._primarykey[0]][0] 8144 elif not [f for f in fields if isinstance(f,Field) and f.type=='id']: 8145 field = Field('id', 'id') 8146 newfields.append(field) 8147 fieldnames.add('id') 8148 self._id = field 8149 virtual_fields = [] 8150 for field in fields: 8151 if isinstance(field, (FieldMethod, FieldVirtual)): 8152 virtual_fields.append(field) 8153 elif isinstance(field, Field) and not field.name in fieldnames: 8154 if field.db is not None: 8155 field = copy.copy(field) 8156 newfields.append(field) 8157 fieldnames.add(field.name) 8158 if field.type=='id': 8159 self._id = field 8160 elif isinstance(field, Table): 8161 table = field 8162 for field in table: 8163 if not field.name in fieldnames and not field.type=='id': 8164 t2 = not table._actual and self._tablename 8165 field = field.clone(point_self_references_to=t2) 8166 newfields.append(field) 8167 fieldnames.add(field.name) 8168 elif not isinstance(field, (Field, Table)): 8169 raise SyntaxError( 8170 'define_table argument is not a Field or Table: %s' % field) 8171 fields = newfields 8172 self._db = db 8173 tablename = tablename 8174 self._fields = SQLCallableList() 8175 self.virtualfields = [] 8176 fields = list(fields) 8177 8178 if db and db._adapter.uploads_in_blob==True: 8179 uploadfields = [f.name for f in fields if f.type=='blob'] 8180 for field in fields: 8181 fn = field.uploadfield 8182 if isinstance(field, Field) and field.type == 'upload'\ 8183 and fn is True: 8184 fn = field.uploadfield = '%s_blob' % field.name 8185 if isinstance(fn,str) and not fn in uploadfields: 8186 fields.append(Field(fn,'blob',default='', 8187 writable=False,readable=False)) 8188 8189 lower_fieldnames = set() 8190 reserved = dir(Table) + ['fields'] 8191 for field in fields: 8192 field_name = field.name 8193 if db and db.check_reserved: 8194 db.check_reserved_keyword(field_name) 8195 elif field_name in reserved: 8196 raise SyntaxError("field name %s not allowed" % field_name) 8197 8198 if field_name.lower() in lower_fieldnames: 8199 raise SyntaxError("duplicate field %s in table %s" \ 8200 % (field_name, tablename)) 8201 else: 8202 lower_fieldnames.add(field_name.lower()) 8203 8204 self.fields.append(field_name) 8205 self[field_name] = field 8206 if field.type == 'id': 8207 self['id'] = field 8208 field.tablename = field._tablename = tablename 8209 field.table = field._table = self 8210 field.db = field._db = db 8211 if db and not field.type in ('text', 'blob', 'json') and \ 8212 db._adapter.maxcharlength < field.length: 8213 field.length = db._adapter.maxcharlength 8214 self.ALL = SQLALL(self) 8215 8216 if hasattr(self,'_primarykey'): 8217 for k in self._primarykey: 8218 if k not in self.fields: 8219 raise SyntaxError( 8220 "primarykey must be a list of fields from table '%s " % tablename) 8221 else: 8222 self[k].notnull = True 8223 for field in virtual_fields: 8224 self[field.name] = field
8225 8226 @property
8227 - def fields(self):
8228 return self._fields
8229
8230 - def update(self,*args,**kwargs):
8231 raise RuntimeError("Syntax Not Supported")
8232
8233 - def _enable_record_versioning(self, 8234 archive_db=None, 8235 archive_name = '%(tablename)s_archive', 8236 current_record = 'current_record', 8237 is_active = 'is_active'):
8238 archive_db = archive_db or self._db 8239 archive_name = archive_name % dict(tablename=self._tablename) 8240 if archive_name in archive_db.tables(): 8241 return # do not try define the archive if already exists 8242 fieldnames = self.fields() 8243 field_type = self if archive_db is self._db else 'bigint' 8244 archive_db.define_table( 8245 archive_name, 8246 Field(current_record,field_type), 8247 *[field.clone(unique=False) for field in self]) 8248 self._before_update.append( 8249 lambda qset,fs,db=archive_db,an=archive_name,cn=current_record: 8250 archive_record(qset,fs,db[an],cn)) 8251 if is_active and is_active in fieldnames: 8252 self._before_delete.append( 8253 lambda qset: qset.update(is_active=False)) 8254 newquery = lambda query, t=self: t.is_active == True 8255 query = self._common_filter 8256 if query: 8257 newquery = query & newquery 8258 self._common_filter = newquery
8259
8260 - def _validate(self,**vars):
8261 errors = Row() 8262 for key,value in vars.iteritems(): 8263 value,error = self[key].validate(value) 8264 if error: 8265 errors[key] = error 8266 return errors
8267
8268 - def _create_references(self):
8269 db = self._db 8270 pr = db._pending_references 8271 self._referenced_by = [] 8272 for field in self: 8273 fieldname = field.name 8274 field_type = field.type 8275 if isinstance(field_type,str) and field_type[:10] == 'reference ': 8276 ref = field_type[10:].strip() 8277 if not ref.split(): 8278 raise SyntaxError('Table: reference to nothing: %s' %ref) 8279 refs = ref.split('.') 8280 rtablename = refs[0] 8281 if not rtablename in db: 8282 pr[rtablename] = pr.get(rtablename,[]) + [field] 8283 continue 8284 rtable = db[rtablename] 8285 if len(refs)==2: 8286 rfieldname = refs[1] 8287 if not hasattr(rtable,'_primarykey'): 8288 raise SyntaxError( 8289 'keyed tables can only reference other keyed tables (for now)') 8290 if rfieldname not in rtable.fields: 8291 raise SyntaxError( 8292 "invalid field '%s' for referenced table '%s' in table '%s'" \ 8293 % (rfieldname, rtablename, self._tablename)) 8294 rtable._referenced_by.append(field) 8295 for referee in pr.get(self._tablename,[]): 8296 self._referenced_by.append(referee)
8297
8298 - def _filter_fields(self, record, id=False):
8299 return dict([(k, v) for (k, v) in record.iteritems() if k 8300 in self.fields and (self[k].type!='id' or id)])
8301
8302 - def _build_query(self,key):
8303 """ for keyed table only """ 8304 query = None 8305 for k,v in key.iteritems(): 8306 if k in self._primarykey: 8307 if query: 8308 query = query & (self[k] == v) 8309 else: 8310 query = (self[k] == v) 8311 else: 8312 raise SyntaxError( 8313 'Field %s is not part of the primary key of %s' % \ 8314 (k,self._tablename)) 8315 return query
8316
8317 - def __getitem__(self, key):
8318 if not key: 8319 return None 8320 elif isinstance(key, dict): 8321 """ for keyed table """ 8322 query = self._build_query(key) 8323 rows = self._db(query).select() 8324 if rows: 8325 return rows[0] 8326 return None 8327 elif str(key).isdigit() or 'google' in DRIVERS and isinstance(key, Key): 8328 return self._db(self._id == key).select(limitby=(0,1)).first() 8329 elif key: 8330 return ogetattr(self, str(key))
8331
8332 - def __call__(self, key=DEFAULT, **kwargs):
8333 for_update = kwargs.get('_for_update',False) 8334 if '_for_update' in kwargs: del kwargs['_for_update'] 8335 8336 orderby = kwargs.get('_orderby',None) 8337 if '_orderby' in kwargs: del kwargs['_orderby'] 8338 8339 if not key is DEFAULT: 8340 if isinstance(key, Query): 8341 record = self._db(key).select( 8342 limitby=(0,1),for_update=for_update, orderby=orderby).first() 8343 elif not str(key).isdigit(): 8344 record = None 8345 else: 8346 record = self._db(self._id == key).select( 8347 limitby=(0,1),for_update=for_update, orderby=orderby).first() 8348 if record: 8349 for k,v in kwargs.iteritems(): 8350 if record[k]!=v: return None 8351 return record 8352 elif kwargs: 8353 query = reduce(lambda a,b:a&b,[self[k]==v for k,v in kwargs.iteritems()]) 8354 return self._db(query).select(limitby=(0,1),for_update=for_update, orderby=orderby).first() 8355 else: 8356 return None
8357
8358 - def __setitem__(self, key, value):
8359 if isinstance(key, dict) and isinstance(value, dict): 8360 """ option for keyed table """ 8361 if set(key.keys()) == set(self._primarykey): 8362 value = self._filter_fields(value) 8363 kv = {} 8364 kv.update(value) 8365 kv.update(key) 8366 if not self.insert(**kv): 8367 query = self._build_query(key) 8368 self._db(query).update(**self._filter_fields(value)) 8369 else: 8370 raise SyntaxError( 8371 'key must have all fields from primary key: %s'%\ 8372 (self._primarykey)) 8373 elif str(key).isdigit(): 8374 if key == 0: 8375 self.insert(**self._filter_fields(value)) 8376 elif self._db(self._id == key)\ 8377 .update(**self._filter_fields(value)) is None: 8378 raise SyntaxError('No such record: %s' % key) 8379 else: 8380 if isinstance(key, dict): 8381 raise SyntaxError( 8382 'value must be a dictionary: %s' % value) 8383 osetattr(self, str(key), value)
8384 8385 __getattr__ = __getitem__ 8386
8387 - def __setattr__(self, key, value):
8388 if key[:1]!='_' and key in self: 8389 raise SyntaxError('Object exists and cannot be redefined: %s' % key) 8390 osetattr(self,key,value)
8391
8392 - def __delitem__(self, key):
8393 if isinstance(key, dict): 8394 query = self._build_query(key) 8395 if not self._db(query).delete(): 8396 raise SyntaxError('No such record: %s' % key) 8397 elif not str(key).isdigit() or \ 8398 not self._db(self._id == key).delete(): 8399 raise SyntaxError('No such record: %s' % key)
8400
8401 - def __contains__(self,key):
8402 return hasattr(self,key)
8403 8404 has_key = __contains__ 8405
8406 - def items(self):
8407 return self.__dict__.items()
8408
8409 - def __iter__(self):
8410 for fieldname in self.fields: 8411 yield self[fieldname]
8412
8413 - def iteritems(self):
8414 return self.__dict__.iteritems()
8415 8416
8417 - def __repr__(self):
8418 return '<Table %s (%s)>' % (self._tablename,','.join(self.fields()))
8419
8420 - def __str__(self):
8421 if hasattr(self,'_ot') and self._ot is not None: 8422 if 'Oracle' in str(type(self._db._adapter)): # <<< patch 8423 return '%s %s' % (self._ot, self._tablename) # <<< patch 8424 return '%s AS %s' % (self._ot, self._tablename) 8425 return self._tablename
8426
8427 - def _drop(self, mode = ''):
8428 return self._db._adapter._drop(self, mode)
8429
8430 - def drop(self, mode = ''):
8431 return self._db._adapter.drop(self,mode)
8432
8433 - def _listify(self,fields,update=False):
8434 new_fields = {} # format: new_fields[name] = (field,value) 8435 8436 # store all fields passed as input in new_fields 8437 for name in fields: 8438 if not name in self.fields: 8439 if name != 'id': 8440 raise SyntaxError( 8441 'Field %s does not belong to the table' % name) 8442 else: 8443 field = self[name] 8444 value = fields[name] 8445 if field.filter_in: 8446 value = field.filter_in(value) 8447 new_fields[name] = (field,value) 8448 8449 # check all fields that should be in the table but are not passed 8450 to_compute = [] 8451 for ofield in self: 8452 name = ofield.name 8453 if not name in new_fields: 8454 # if field is supposed to be computed, compute it! 8455 if ofield.compute: # save those to compute for later 8456 to_compute.append((name,ofield)) 8457 # if field is required, check its default value 8458 elif not update and not ofield.default is None: 8459 value = ofield.default 8460 fields[name] = value 8461 new_fields[name] = (ofield,value) 8462 # if this is an update, user the update field instead 8463 elif update and not ofield.update is None: 8464 value = ofield.update 8465 fields[name] = value 8466 new_fields[name] = (ofield,value) 8467 # if the field is still not there but it should, error 8468 elif not update and ofield.required: 8469 raise RuntimeError( 8470 'Table: missing required field: %s' % name) 8471 # now deal with fields that are supposed to be computed 8472 if to_compute: 8473 row = Row(fields) 8474 for name,ofield in to_compute: 8475 # try compute it 8476 try: 8477 new_fields[name] = (ofield,ofield.compute(row)) 8478 except (KeyError, AttributeError): 8479 # error sinlently unless field is required! 8480 if ofield.required: 8481 raise SyntaxError('unable to comput field: %s' % name) 8482 return new_fields.values()
8483
8484 - def _attempt_upload(self, fields):
8485 for field in self: 8486 if field.type=='upload' and field.name in fields: 8487 value = fields[field.name] 8488 if value and not isinstance(value,str): 8489 if hasattr(value,'file') and hasattr(value,'filename'): 8490 new_name = field.store(value.file,filename=value.filename) 8491 elif hasattr(value,'read') and hasattr(value,'name'): 8492 new_name = field.store(value,filename=value.name) 8493 else: 8494 raise RuntimeError("Unable to handle upload") 8495 fields[field.name] = new_name
8496
8497 - def _defaults(self, fields):
8498 "If there are no fields/values specified, return table defaults" 8499 if not fields: 8500 fields = {} 8501 for field in self: 8502 if field.type != "id": 8503 fields[field.name] = field.default 8504 return fields
8505
8506 - def _insert(self, **fields):
8507 fields = self._defaults(fields) 8508 return self._db._adapter._insert(self, self._listify(fields))
8509
8510 - def insert(self, **fields):
8511 fields = self._defaults(fields) 8512 self._attempt_upload(fields) 8513 if any(f(fields) for f in self._before_insert): return 0 8514 ret = self._db._adapter.insert(self, self._listify(fields)) 8515 if ret and self._after_insert: 8516 fields = Row(fields) 8517 [f(fields,ret) for f in self._after_insert] 8518 return ret
8519
8520 - def validate_and_insert(self,**fields):
8521 response = Row() 8522 response.errors = Row() 8523 new_fields = copy.copy(fields) 8524 for key,value in fields.iteritems(): 8525 value,error = self[key].validate(value) 8526 if error: 8527 response.errors[key] = "%s" % error 8528 else: 8529 new_fields[key] = value 8530 if not response.errors: 8531 response.id = self.insert(**new_fields) 8532 else: 8533 response.id = None 8534 return response
8535
8536 - def update_or_insert(self, _key=DEFAULT, **values):
8537 if _key is DEFAULT: 8538 record = self(**values) 8539 elif isinstance(_key,dict): 8540 record = self(**_key) 8541 else: 8542 record = self(_key) 8543 if record: 8544 record.update_record(**values) 8545 newid = None 8546 else: 8547 newid = self.insert(**values) 8548 return newid
8549
8550 - def bulk_insert(self, items):
8551 """ 8552 here items is a list of dictionaries 8553 """ 8554 items = [self._listify(item) for item in items] 8555 if any(f(item) for item in items for f in self._before_insert):return 0 8556 ret = self._db._adapter.bulk_insert(self,items) 8557 ret and [[f(item,ret[k]) for k,item in enumerate(items)] for f in self._after_insert] 8558 return ret
8559
8560 - def _truncate(self, mode = None):
8561 return self._db._adapter._truncate(self, mode)
8562
8563 - def truncate(self, mode = None):
8564 return self._db._adapter.truncate(self, mode)
8565
8566 - def import_from_csv_file( 8567 self, 8568 csvfile, 8569 id_map=None, 8570 null='<NULL>', 8571 unique='uuid', 8572 id_offset=None, # id_offset used only when id_map is None 8573 *args, **kwargs 8574 ):
8575 """ 8576 Import records from csv file. 8577 Column headers must have same names as table fields. 8578 Field 'id' is ignored. 8579 If column names read 'table.file' the 'table.' prefix is ignored. 8580 'unique' argument is a field which must be unique 8581 (typically a uuid field) 8582 'restore' argument is default False; 8583 if set True will remove old values in table first. 8584 'id_map' ff set to None will not map ids. 8585 The import will keep the id numbers in the restored table. 8586 This assumes that there is an field of type id that 8587 is integer and in incrementing order. 8588 Will keep the id numbers in restored table. 8589 """ 8590 8591 delimiter = kwargs.get('delimiter', ',') 8592 quotechar = kwargs.get('quotechar', '"') 8593 quoting = kwargs.get('quoting', csv.QUOTE_MINIMAL) 8594 restore = kwargs.get('restore', False) 8595 if restore: 8596 self._db[self].truncate() 8597 8598 reader = csv.reader(csvfile, delimiter=delimiter, 8599 quotechar=quotechar, quoting=quoting) 8600 colnames = None 8601 if isinstance(id_map, dict): 8602 if not self._tablename in id_map: 8603 id_map[self._tablename] = {} 8604 id_map_self = id_map[self._tablename] 8605 8606 def fix(field, value, id_map, id_offset): 8607 list_reference_s='list:reference' 8608 if value == null: 8609 value = None 8610 elif field.type=='blob': 8611 value = base64.b64decode(value) 8612 elif field.type=='double' or field.type=='float': 8613 if not value.strip(): 8614 value = None 8615 else: 8616 value = float(value) 8617 elif field.type in ('integer','bigint'): 8618 if not value.strip(): 8619 value = None 8620 else: 8621 value = int(value) 8622 elif field.type.startswith('list:string'): 8623 value = bar_decode_string(value) 8624 elif field.type.startswith(list_reference_s): 8625 ref_table = field.type[len(list_reference_s):].strip() 8626 if id_map is not None: 8627 value = [id_map[ref_table][int(v)] \ 8628 for v in bar_decode_string(value)] 8629 else: 8630 value = [v for v in bar_decode_string(value)] 8631 elif field.type.startswith('list:'): 8632 value = bar_decode_integer(value) 8633 elif id_map and field.type.startswith('reference'): 8634 try: 8635 value = id_map[field.type[9:].strip()][int(value)] 8636 except KeyError: 8637 pass 8638 elif id_offset and field.type.startswith('reference'): 8639 try: 8640 value = id_offset[field.type[9:].strip()]+int(value) 8641 except KeyError: 8642 pass 8643 return (field.name, value)
8644 8645 def is_id(colname): 8646 if colname in self: 8647 return self[colname].type == 'id' 8648 else: 8649 return False
8650 8651 first = True 8652 unique_idx = None 8653 for line in reader: 8654 if not line: 8655 break 8656 if not colnames: 8657 colnames = [x.split('.',1)[-1] for x in line][:len(line)] 8658 cols, cid = [], None 8659 for i,colname in enumerate(colnames): 8660 if is_id(colname): 8661 cid = i 8662 else: 8663 cols.append(i) 8664 if colname == unique: 8665 unique_idx = i 8666 else: 8667 items = [fix(self[colnames[i]], line[i], id_map, id_offset) \ 8668 for i in cols if colnames[i] in self.fields] 8669 8670 if not id_map and cid is not None and id_offset is not None and not unique_idx: 8671 csv_id = int(line[cid]) 8672 curr_id = self.insert(**dict(items)) 8673 if first: 8674 first = False 8675 # First curr_id is bigger than csv_id, 8676 # then we are not restoring but 8677 # extending db table with csv db table 8678 if curr_id>csv_id: 8679 id_offset[self._tablename] = curr_id-csv_id 8680 else: 8681 id_offset[self._tablename] = 0 8682 # create new id until we get the same as old_id+offset 8683 while curr_id<csv_id+id_offset[self._tablename]: 8684 self._db(self._db[self][colnames[cid]] == curr_id).delete() 8685 curr_id = self.insert(**dict(items)) 8686 # Validation. Check for duplicate of 'unique' &, 8687 # if present, update instead of insert. 8688 elif not unique_idx: 8689 new_id = self.insert(**dict(items)) 8690 else: 8691 unique_value = line[unique_idx] 8692 query = self._db[self][unique] == unique_value 8693 record = self._db(query).select().first() 8694 if record: 8695 record.update_record(**dict(items)) 8696 new_id = record[self._id.name] 8697 else: 8698 new_id = self.insert(**dict(items)) 8699 if id_map and cid is not None: 8700 id_map_self[int(line[cid])] = new_id 8701
8702 - def as_dict(self, flat=False, sanitize=True, field_options=True):
8703 tablename = str(self) 8704 table_as_dict = dict(name=tablename, items={}, fields=[], 8705 sequence_name=self._sequence_name, 8706 trigger_name=self._trigger_name, 8707 common_filter=self._common_filter, format=self._format, 8708 singular=self._singular, plural=self._plural) 8709 8710 for field in self: 8711 if (field.readable or field.writable) or (not sanitize): 8712 table_as_dict["fields"].append(field.name) 8713 table_as_dict["items"][field.name] = \ 8714 field.as_dict(flat=flat, sanitize=sanitize, 8715 options=field_options) 8716 return table_as_dict
8717
8718 - def as_xml(self, sanitize=True, field_options=True):
8719 if not have_serializers: 8720 raise ImportError("No xml serializers available") 8721 d = self.as_dict(flat=True, sanitize=sanitize, 8722 field_options=field_options) 8723 return serializers.xml(d)
8724
8725 - def as_json(self, sanitize=True, field_options=True):
8726 if not have_serializers: 8727 raise ImportError("No json serializers available") 8728 d = self.as_dict(flat=True, sanitize=sanitize, 8729 field_options=field_options) 8730 return serializers.json(d)
8731
8732 - def as_yaml(self, sanitize=True, field_options=True):
8733 if not have_serializers: 8734 raise ImportError("No YAML serializers available") 8735 d = self.as_dict(flat=True, sanitize=sanitize, 8736 field_options=field_options) 8737 return serializers.yaml(d)
8738
8739 - def with_alias(self, alias):
8740 return self._db._adapter.alias(self,alias)
8741
8742 - def on(self, query):
8743 return Expression(self._db,self._db._adapter.ON,self,query)
8744
8745 -def archive_record(qset,fs,archive_table,current_record):
8746 tablenames = qset.db._adapter.tables(qset.query) 8747 if len(tablenames)!=1: raise RuntimeError("cannot update join") 8748 table = qset.db[tablenames[0]] 8749 for row in qset.select(): 8750 fields = archive_table._filter_fields(row) 8751 fields[current_record] = row.id 8752 archive_table.insert(**fields) 8753 return False
8754
8755 8756 8757 -class Expression(object):
8758
8759 - def __init__( 8760 self, 8761 db, 8762 op, 8763 first=None, 8764 second=None, 8765 type=None, 8766 **optional_args 8767 ):
8768 8769 self.db = db 8770 self.op = op 8771 self.first = first 8772 self.second = second 8773 self._table = getattr(first,'_table',None) 8774 ### self._tablename = first._tablename ## CHECK 8775 if not type and first and hasattr(first,'type'): 8776 self.type = first.type 8777 else: 8778 self.type = type 8779 self.optional_args = optional_args
8780
8781 - def sum(self):
8782 db = self.db 8783 return Expression(db, db._adapter.AGGREGATE, self, 'SUM', self.type)
8784
8785 - def max(self):
8786 db = self.db 8787 return Expression(db, db._adapter.AGGREGATE, self, 'MAX', self.type)
8788
8789 - def min(self):
8790 db = self.db 8791 return Expression(db, db._adapter.AGGREGATE, self, 'MIN', self.type)
8792
8793 - def len(self):
8794 db = self.db 8795 return Expression(db, db._adapter.LENGTH, self, None, 'integer')
8796
8797 - def avg(self):
8798 db = self.db 8799 return Expression(db, db._adapter.AGGREGATE, self, 'AVG', self.type)
8800
8801 - def abs(self):
8802 db = self.db 8803 return Expression(db, db._adapter.AGGREGATE, self, 'ABS', self.type)
8804
8805 - def lower(self):
8806 db = self.db 8807 return Expression(db, db._adapter.LOWER, self, None, self.type)
8808
8809 - def upper(self):
8810 db = self.db 8811 return Expression(db, db._adapter.UPPER, self, None, self.type)
8812
8813 - def replace(self,a,b):
8814 db = self.db 8815 return Expression(db, db._adapter.REPLACE, self, (a,b), self.type)
8816
8817 - def year(self):
8818 db = self.db 8819 return Expression(db, db._adapter.EXTRACT, self, 'year', 'integer')
8820
8821 - def month(self):
8822 db = self.db 8823 return Expression(db, db._adapter.EXTRACT, self, 'month', 'integer')
8824
8825 - def day(self):
8826 db = self.db 8827 return Expression(db, db._adapter.EXTRACT, self, 'day', 'integer')
8828
8829 - def hour(self):
8830 db = self.db 8831 return Expression(db, db._adapter.EXTRACT, self, 'hour', 'integer')
8832
8833 - def minutes(self):
8834 db = self.db 8835 return Expression(db, db._adapter.EXTRACT, self, 'minute', 'integer')
8836
8837 - def coalesce(self,*others):
8838 db = self.db 8839 return Expression(db, db._adapter.COALESCE, self, others, self.type)
8840
8841 - def coalesce_zero(self):
8842 db = self.db 8843 return Expression(db, db._adapter.COALESCE_ZERO, self, None, self.type)
8844
8845 - def seconds(self):
8846 db = self.db 8847 return Expression(db, db._adapter.EXTRACT, self, 'second', 'integer')
8848
8849 - def epoch(self):
8850 db = self.db 8851 return Expression(db, db._adapter.EPOCH, self, None, 'integer')
8852
8853 - def __getslice__(self, start, stop):
8854 db = self.db 8855 if start < 0: 8856 pos0 = '(%s - %d)' % (self.len(), abs(start) - 1) 8857 else: 8858 pos0 = start + 1 8859 8860 if stop < 0: 8861 length = '(%s - %d - %s)' % (self.len(), abs(stop) - 1, pos0) 8862 elif stop == sys.maxint: 8863 length = self.len() 8864 else: 8865 length = '(%s - %s)' % (stop + 1, pos0) 8866 return Expression(db,db._adapter.SUBSTRING, 8867 self, (pos0, length), self.type)
8868
8869 - def __getitem__(self, i):
8870 return self[i:i + 1]
8871
8872 - def __str__(self):
8873 return self.db._adapter.expand(self,self.type)
8874
8875 - def __or__(self, other): # for use in sortby
8876 db = self.db 8877 return Expression(db,db._adapter.COMMA,self,other,self.type)
8878
8879 - def __invert__(self):
8880 db = self.db 8881 if hasattr(self,'_op') and self.op == db._adapter.INVERT: 8882 return self.first 8883 return Expression(db,db._adapter.INVERT,self,type=self.type)
8884
8885 - def __add__(self, other):
8886 db = self.db 8887 return Expression(db,db._adapter.ADD,self,other,self.type)
8888
8889 - def __sub__(self, other):
8890 db = self.db 8891 if self.type in ('integer','bigint'): 8892 result_type = 'integer' 8893 elif self.type in ['date','time','datetime','double','float']: 8894 result_type = 'double' 8895 elif self.type.startswith('decimal('): 8896 result_type = self.type 8897 else: 8898 raise SyntaxError("subtraction operation not supported for type") 8899 return Expression(db,db._adapter.SUB,self,other,result_type)
8900
8901 - def __mul__(self, other):
8902 db = self.db 8903 return Expression(db,db._adapter.MUL,self,other,self.type)
8904
8905 - def __div__(self, other):
8906 db = self.db 8907 return Expression(db,db._adapter.DIV,self,other,self.type)
8908
8909 - def __mod__(self, other):
8910 db = self.db 8911 return Expression(db,db._adapter.MOD,self,other,self.type)
8912
8913 - def __eq__(self, value):
8914 db = self.db 8915 return Query(db, db._adapter.EQ, self, value)
8916
8917 - def __ne__(self, value):
8918 db = self.db 8919 return Query(db, db._adapter.NE, self, value)
8920
8921 - def __lt__(self, value):
8922 db = self.db 8923 return Query(db, db._adapter.LT, self, value)
8924
8925 - def __le__(self, value):
8926 db = self.db 8927 return Query(db, db._adapter.LE, self, value)
8928
8929 - def __gt__(self, value):
8930 db = self.db 8931 return Query(db, db._adapter.GT, self, value)
8932
8933 - def __ge__(self, value):
8934 db = self.db 8935 return Query(db, db._adapter.GE, self, value)
8936
8937 - def like(self, value, case_sensitive=False):
8938 db = self.db 8939 op = case_sensitive and db._adapter.LIKE or db._adapter.ILIKE 8940 return Query(db, op, self, value)
8941
8942 - def regexp(self, value):
8943 db = self.db 8944 return Query(db, db._adapter.REGEXP, self, value)
8945
8946 - def belongs(self, *value):
8947 """ 8948 Accepts the following inputs: 8949 field.belongs(1,2) 8950 field.belongs((1,2)) 8951 field.belongs(query) 8952 8953 Does NOT accept: 8954 field.belongs(1) 8955 """ 8956 db = self.db 8957 if len(value) == 1: 8958 value = value[0] 8959 if isinstance(value,Query): 8960 value = db(value)._select(value.first._table._id) 8961 return Query(db, db._adapter.BELONGS, self, value)
8962
8963 - def startswith(self, value):
8964 db = self.db 8965 if not self.type in ('string', 'text', 'json'): 8966 raise SyntaxError("startswith used with incompatible field type") 8967 return Query(db, db._adapter.STARTSWITH, self, value)
8968
8969 - def endswith(self, value):
8970 db = self.db 8971 if not self.type in ('string', 'text', 'json'): 8972 raise SyntaxError("endswith used with incompatible field type") 8973 return Query(db, db._adapter.ENDSWITH, self, value)
8974
8975 - def contains(self, value, all=False, case_sensitive=False):
8976 """ 8977 The case_sensitive parameters is only useful for PostgreSQL 8978 For other RDMBs it is ignored and contains is always case in-sensitive 8979 For MongoDB and GAE contains is always case sensitive 8980 """ 8981 db = self.db 8982 if isinstance(value,(list, tuple)): 8983 subqueries = [self.contains(str(v).strip(),case_sensitive=case_sensitive) 8984 for v in value if str(v).strip()] 8985 if not subqueries: 8986 return self.contains('') 8987 else: 8988 return reduce(all and AND or OR,subqueries) 8989 if not self.type in ('string', 'text', 'json') and not self.type.startswith('list:'): 8990 raise SyntaxError("contains used with incompatible field type") 8991 return Query(db, db._adapter.CONTAINS, self, value, case_sensitive=case_sensitive)
8992
8993 - def with_alias(self, alias):
8994 db = self.db 8995 return Expression(db, db._adapter.AS, self, alias, self.type)
8996 8997 # GIS expressions 8998
8999 - def st_asgeojson(self, precision=15, options=0, version=1):
9000 return Expression(self.db, self.db._adapter.ST_ASGEOJSON, self, 9001 dict(precision=precision, options=options, 9002 version=version), 'string')
9003
9004 - def st_astext(self):
9005 db = self.db 9006 return Expression(db, db._adapter.ST_ASTEXT, self, type='string')
9007
9008 - def st_x(self):
9009 db = self.db 9010 return Expression(db, db._adapter.ST_X, self, type='string')
9011
9012 - def st_y(self):
9013 db = self.db 9014 return Expression(db, db._adapter.ST_Y, self, type='string')
9015
9016 - def st_distance(self, other):
9017 db = self.db 9018 return Expression(db,db._adapter.ST_DISTANCE,self,other, 'double')
9019
9020 - def st_simplify(self, value):
9021 db = self.db 9022 return Expression(db, db._adapter.ST_SIMPLIFY, self, value, self.type)
9023 9024 # GIS queries 9025
9026 - def st_contains(self, value):
9027 db = self.db 9028 return Query(db, db._adapter.ST_CONTAINS, self, value)
9029
9030 - def st_equals(self, value):
9031 db = self.db 9032 return Query(db, db._adapter.ST_EQUALS, self, value)
9033
9034 - def st_intersects(self, value):
9035 db = self.db 9036 return Query(db, db._adapter.ST_INTERSECTS, self, value)
9037
9038 - def st_overlaps(self, value):
9039 db = self.db 9040 return Query(db, db._adapter.ST_OVERLAPS, self, value)
9041
9042 - def st_touches(self, value):
9043 db = self.db 9044 return Query(db, db._adapter.ST_TOUCHES, self, value)
9045
9046 - def st_within(self, value):
9047 db = self.db 9048 return Query(db, db._adapter.ST_WITHIN, self, value)
9049
9050 # for use in both Query and sortby 9051 9052 9053 -class SQLCustomType(object):
9054 """ 9055 allows defining of custom SQL types 9056 9057 Example:: 9058 9059 decimal = SQLCustomType( 9060 type ='double', 9061 native ='integer', 9062 encoder =(lambda x: int(float(x) * 100)), 9063 decoder = (lambda x: Decimal("0.00") + Decimal(str(float(x)/100)) ) 9064 ) 9065 9066 db.define_table( 9067 'example', 9068 Field('value', type=decimal) 9069 ) 9070 9071 :param type: the web2py type (default = 'string') 9072 :param native: the backend type 9073 :param encoder: how to encode the value to store it in the backend 9074 :param decoder: how to decode the value retrieved from the backend 9075 :param validator: what validators to use ( default = None, will use the 9076 default validator for type) 9077 """ 9078
9079 - def __init__( 9080 self, 9081 type='string', 9082 native=None, 9083 encoder=None, 9084 decoder=None, 9085 validator=None, 9086 _class=None, 9087 ):
9088 9089 self.type = type 9090 self.native = native 9091 self.encoder = encoder or (lambda x: x) 9092 self.decoder = decoder or (lambda x: x) 9093 self.validator = validator 9094 self._class = _class or type
9095
9096 - def startswith(self, text=None):
9097 try: 9098 return self.type.startswith(self, text) 9099 except TypeError: 9100 return False
9101
9102 - def __getslice__(self, a=0, b=100):
9103 return None
9104
9105 - def __getitem__(self, i):
9106 return None
9107
9108 - def __str__(self):
9109 return self._class
9110
9111 -class FieldVirtual(object):
9112 - def __init__(self, name, f=None, ftype='string',label=None,table_name=None):
9113 # for backward compatibility 9114 (self.name, self.f) = (name, f) if f else ('unkown', name) 9115 self.type = ftype 9116 self.label = label or self.name.capitalize().replace('_',' ') 9117 self.represent = IDENTITY 9118 self.formatter = IDENTITY 9119 self.comment = None 9120 self.readable = True 9121 self.writable = False 9122 self.requires = None 9123 self.widget = None 9124 self.tablename = table_name 9125 self.filter_out = None
9126
9127 -class FieldMethod(object):
9128 - def __init__(self, name, f=None, handler=None):
9129 # for backward compatibility 9130 (self.name, self.f) = (name, f) if f else ('unkown', name) 9131 self.handler = handler
9132
9133 -def list_represent(x,r=None):
9134 return ', '.join(str(y) for y in x or [])
9135
9136 -class Field(Expression):
9137 9138 Virtual = FieldVirtual 9139 Method = FieldMethod 9140 Lazy = FieldMethod # for backward compatibility 9141 9142 """ 9143 an instance of this class represents a database field 9144 9145 example:: 9146 9147 a = Field(name, 'string', length=32, default=None, required=False, 9148 requires=IS_NOT_EMPTY(), ondelete='CASCADE', 9149 notnull=False, unique=False, 9150 uploadfield=True, widget=None, label=None, comment=None, 9151 uploadfield=True, # True means store on disk, 9152 # 'a_field_name' means store in this field in db 9153 # False means file content will be discarded. 9154 writable=True, readable=True, update=None, authorize=None, 9155 autodelete=False, represent=None, uploadfolder=None, 9156 uploadseparate=False # upload to separate directories by uuid_keys 9157 # first 2 character and tablename.fieldname 9158 # False - old behavior 9159 # True - put uploaded file in 9160 # <uploaddir>/<tablename>.<fieldname>/uuid_key[:2] 9161 # directory) 9162 uploadfs=None # a pyfilesystem where to store upload 9163 9164 to be used as argument of DAL.define_table 9165 9166 allowed field types: 9167 string, boolean, integer, double, text, blob, 9168 date, time, datetime, upload, password 9169 9170 strings must have a length of Adapter.maxcharlength by default (512 or 255 for mysql) 9171 fields should have a default or they will be required in SQLFORMs 9172 the requires argument is used to validate the field input in SQLFORMs 9173 9174 """ 9175
9176 - def __init__( 9177 self, 9178 fieldname, 9179 type='string', 9180 length=None, 9181 default=DEFAULT, 9182 required=False, 9183 requires=DEFAULT, 9184 ondelete='CASCADE', 9185 notnull=False, 9186 unique=False, 9187 uploadfield=True, 9188 widget=None, 9189 label=None, 9190 comment=None, 9191 writable=True, 9192 readable=True, 9193 update=None, 9194 authorize=None, 9195 autodelete=False, 9196 represent=None, 9197 uploadfolder=None, 9198 uploadseparate=False, 9199 uploadfs=None, 9200 compute=None, 9201 custom_store=None, 9202 custom_retrieve=None, 9203 custom_retrieve_file_properties=None, 9204 custom_delete=None, 9205 filter_in = None, 9206 filter_out = None, 9207 custom_qualifier = None, 9208 map_none = None, 9209 ):
9210 self._db = self.db = None # both for backward compatibility 9211 self.op = None 9212 self.first = None 9213 self.second = None 9214 self.name = fieldname = cleanup(fieldname) 9215 if not isinstance(fieldname,str) or hasattr(Table,fieldname) or \ 9216 fieldname[0] == '_' or REGEX_PYTHON_KEYWORDS.match(fieldname): 9217 raise SyntaxError('Field: invalid field name: %s' % fieldname) 9218 self.type = type if not isinstance(type, (Table,Field)) else 'reference %s' % type 9219 self.length = length if not length is None else DEFAULTLENGTH.get(self.type,512) 9220 self.default = default if default!=DEFAULT else (update or None) 9221 self.required = required # is this field required 9222 self.ondelete = ondelete.upper() # this is for reference fields only 9223 self.notnull = notnull 9224 self.unique = unique 9225 self.uploadfield = uploadfield 9226 self.uploadfolder = uploadfolder 9227 self.uploadseparate = uploadseparate 9228 self.uploadfs = uploadfs 9229 self.widget = widget 9230 self.comment = comment 9231 self.writable = writable 9232 self.readable = readable 9233 self.update = update 9234 self.authorize = authorize 9235 self.autodelete = autodelete 9236 self.represent = list_represent if \ 9237 represent==None and type in ('list:integer','list:string') else represent 9238 self.compute = compute 9239 self.isattachment = True 9240 self.custom_store = custom_store 9241 self.custom_retrieve = custom_retrieve 9242 self.custom_retrieve_file_properties = custom_retrieve_file_properties 9243 self.custom_delete = custom_delete 9244 self.filter_in = filter_in 9245 self.filter_out = filter_out 9246 self.custom_qualifier = custom_qualifier 9247 self.label = label if label!=None else fieldname.replace('_',' ').title() 9248 self.requires = requires if requires!=None else [] 9249 self.map_none = map_none
9250
9251 - def set_attributes(self,*args,**attributes):
9252 self.__dict__.update(*args,**attributes)
9253
9254 - def clone(self,point_self_references_to=False,**args):
9255 field = copy.copy(self) 9256 if point_self_references_to and \ 9257 field.type == 'reference %s'+field._tablename: 9258 field.type = 'reference %s' % point_self_references_to 9259 field.__dict__.update(args) 9260 return field
9261
9262 - def store(self, file, filename=None, path=None):
9263 if self.custom_store: 9264 return self.custom_store(file,filename,path) 9265 if isinstance(file, cgi.FieldStorage): 9266 filename = filename or file.filename 9267 file = file.file 9268 elif not filename: 9269 filename = file.name 9270 filename = os.path.basename(filename.replace('/', os.sep)\ 9271 .replace('\\', os.sep)) 9272 m = REGEX_STORE_PATTERN.search(filename) 9273 extension = m and m.group('e') or 'txt' 9274 uuid_key = web2py_uuid().replace('-', '')[-16:] 9275 encoded_filename = base64.b16encode(filename).lower() 9276 newfilename = '%s.%s.%s.%s' % \ 9277 (self._tablename, self.name, uuid_key, encoded_filename) 9278 newfilename = newfilename[:(self.length - 1 - len(extension))] + '.' + extension 9279 self_uploadfield = self.uploadfield 9280 if isinstance(self_uploadfield,Field): 9281 blob_uploadfield_name = self_uploadfield.uploadfield 9282 keys={self_uploadfield.name: newfilename, 9283 blob_uploadfield_name: file.read()} 9284 self_uploadfield.table.insert(**keys) 9285 elif self_uploadfield == True: 9286 if path: 9287 pass 9288 elif self.uploadfolder: 9289 path = self.uploadfolder 9290 elif self.db._adapter.folder: 9291 path = pjoin(self.db._adapter.folder, '..', 'uploads') 9292 else: 9293 raise RuntimeError( 9294 "you must specify a Field(...,uploadfolder=...)") 9295 if self.uploadseparate: 9296 if self.uploadfs: 9297 raise RuntimeError("not supported") 9298 path = pjoin(path,"%s.%s" %(self._tablename, self.name), 9299 uuid_key[:2]) 9300 if not exists(path): 9301 os.makedirs(path) 9302 pathfilename = pjoin(path, newfilename) 9303 if self.uploadfs: 9304 dest_file = self.uploadfs.open(newfilename, 'wb') 9305 else: 9306 dest_file = open(pathfilename, 'wb') 9307 try: 9308 shutil.copyfileobj(file, dest_file) 9309 except IOError: 9310 raise IOError( 9311 'Unable to store file "%s" because invalid permissions, readonly file system, or filename too long' % pathfilename) 9312 dest_file.close() 9313 return newfilename
9314
9315 - def retrieve(self, name, path=None, nameonly=False):
9316 """ 9317 if nameonly==True return (filename, fullfilename) instead of 9318 (filename, stream) 9319 """ 9320 self_uploadfield = self.uploadfield 9321 if self.custom_retrieve: 9322 return self.custom_retrieve(name, path) 9323 import http 9324 if self.authorize or isinstance(self_uploadfield, str): 9325 row = self.db(self == name).select().first() 9326 if not row: 9327 raise http.HTTP(404) 9328 if self.authorize and not self.authorize(row): 9329 raise http.HTTP(403) 9330 m = REGEX_UPLOAD_PATTERN.match(name) 9331 if not m or not self.isattachment: 9332 raise TypeError('Can\'t retrieve %s' % name) 9333 file_properties = self.retrieve_file_properties(name,path) 9334 filename = file_properties['filename'] 9335 if isinstance(self_uploadfield, str): # ## if file is in DB 9336 stream = StringIO.StringIO(row[self_uploadfield] or '') 9337 elif isinstance(self_uploadfield,Field): 9338 blob_uploadfield_name = self_uploadfield.uploadfield 9339 query = self_uploadfield == name 9340 data = self_uploadfield.table(query)[blob_uploadfield_name] 9341 stream = StringIO.StringIO(data) 9342 elif self.uploadfs: 9343 # ## if file is on pyfilesystem 9344 stream = self.uploadfs.open(name, 'rb') 9345 else: 9346 # ## if file is on regular filesystem 9347 # this is intentially a sting with filename and not a stream 9348 # this propagates and allows stream_file_or_304_or_206 to be called 9349 fullname = pjoin(file_properties['path'],name) 9350 if nameonly: 9351 return (filename, fullname) 9352 stream = open(fullname,'rb') 9353 return (filename, stream)
9354
9355 - def retrieve_file_properties(self, name, path=None):
9356 self_uploadfield = self.uploadfield 9357 if self.custom_retrieve_file_properties: 9358 return self.custom_retrieve_file_properties(name, path) 9359 try: 9360 m = REGEX_UPLOAD_PATTERN.match(name) 9361 if not m or not self.isattachment: 9362 raise TypeError('Can\'t retrieve %s file properties' % name) 9363 filename = base64.b16decode(m.group('name'), True) 9364 filename = REGEX_CLEANUP_FN.sub('_', filename) 9365 except (TypeError, AttributeError): 9366 filename = name 9367 if isinstance(self_uploadfield, str): # ## if file is in DB 9368 return dict(path=None,filename=filename) 9369 elif isinstance(self_uploadfield,Field): 9370 return dict(path=None,filename=filename) 9371 else: 9372 # ## if file is on filesystem 9373 if path: 9374 pass 9375 elif self.uploadfolder: 9376 path = self.uploadfolder 9377 else: 9378 path = pjoin(self.db._adapter.folder, '..', 'uploads') 9379 if self.uploadseparate: 9380 t = m.group('table') 9381 f = m.group('field') 9382 u = m.group('uuidkey') 9383 path = pjoin(path,"%s.%s" % (t,f),u[:2]) 9384 return dict(path=path,filename=filename)
9385 9386
9387 - def formatter(self, value):
9388 requires = self.requires 9389 if value is None or not requires: 9390 return value or self.map_none 9391 if not isinstance(requires, (list, tuple)): 9392 requires = [requires] 9393 elif isinstance(requires, tuple): 9394 requires = list(requires) 9395 else: 9396 requires = copy.copy(requires) 9397 requires.reverse() 9398 for item in requires: 9399 if hasattr(item, 'formatter'): 9400 value = item.formatter(value) 9401 return value
9402
9403 - def validate(self, value):
9404 if not self.requires or self.requires == DEFAULT: 9405 return ((value if value!=self.map_none else None), None) 9406 requires = self.requires 9407 if not isinstance(requires, (list, tuple)): 9408 requires = [requires] 9409 for validator in requires: 9410 (value, error) = validator(value) 9411 if error: 9412 return (value, error) 9413 return ((value if value!=self.map_none else None), None)
9414
9415 - def count(self, distinct=None):
9416 return Expression(self.db, self.db._adapter.COUNT, self, distinct, 'integer')
9417
9418 - def as_dict(self, flat=False, sanitize=True, options=True):
9419 9420 attrs = ('type', 'length', 'default', 'required', 9421 'ondelete', 'notnull', 'unique', 'uploadfield', 9422 'widget', 'label', 'comment', 'writable', 'readable', 9423 'update', 'authorize', 'autodelete', 'represent', 9424 'uploadfolder', 'uploadseparate', 'uploadfs', 9425 'compute', 'custom_store', 'custom_retrieve', 9426 'custom_retrieve_file_properties', 'custom_delete', 9427 'filter_in', 'filter_out', 'custom_qualifier', 9428 'map_none', 'name') 9429 9430 SERIALIZABLE_TYPES = (int, long, basestring, dict, list, 9431 float, tuple, bool, type(None)) 9432 9433 def flatten(obj): 9434 if flat: 9435 if isinstance(obj, flatten.__class__): 9436 return str(type(obj)) 9437 elif isinstance(obj, type): 9438 try: 9439 return str(obj).split("'")[1] 9440 except IndexError: 9441 return str(obj) 9442 elif not isinstance(obj, SERIALIZABLE_TYPES): 9443 return str(obj) 9444 elif isinstance(obj, dict): 9445 newobj = dict() 9446 for k, v in obj.items(): 9447 newobj[k] = flatten(v) 9448 return newobj 9449 elif isinstance(obj, (list, tuple, set)): 9450 return [flatten(v) for v in obj] 9451 else: 9452 return obj 9453 elif isinstance(obj, (dict, set)): 9454 return obj.copy() 9455 else: return obj
9456 9457 def filter_requires(t, r, options=True): 9458 if sanitize and any([keyword in str(t).upper() for 9459 keyword in ("CRYPT", "IS_STRONG")]): 9460 return None 9461 9462 if not isinstance(r, dict): 9463 if options and hasattr(r, "options"): 9464 if callable(r.options): 9465 r.options() 9466 newr = r.__dict__.copy() 9467 else: 9468 newr = r.copy() 9469 9470 # remove options if not required 9471 if not options and newr.has_key("labels"): 9472 [newr.update({key:None}) for key in 9473 ("labels", "theset") if (key in newr)] 9474 9475 for k, v in newr.items(): 9476 if k == "other": 9477 if isinstance(v, dict): 9478 otype, other = v.popitem() 9479 else: 9480 otype = flatten(type(v)) 9481 other = v 9482 newr[k] = {otype: filter_requires(otype, other, 9483 options=options)} 9484 else: 9485 newr[k] = flatten(v) 9486 return newr
9487 9488 if isinstance(self.requires, (tuple, list, set)): 9489 requires = dict([(flatten(type(r)), 9490 filter_requires(type(r), r, 9491 options=options)) for 9492 r in self.requires]) 9493 else: 9494 requires = {flatten(type(self.requires)): 9495 filter_requires(type(self.requires), 9496 self.requires, options=options)} 9497 9498 d = dict(colname="%s.%s" % (self.tablename, self.name), 9499 requires=requires) 9500 d.update([(attr, flatten(getattr(self, attr))) for attr in attrs]) 9501 return d 9502
9503 - def as_xml(self, sanitize=True, options=True):
9504 if have_serializers: 9505 xml = serializers.xml 9506 else: 9507 raise ImportError("No xml serializers available") 9508 d = self.as_dict(flat=True, sanitize=sanitize, 9509 options=options) 9510 return xml(d)
9511
9512 - def as_json(self, sanitize=True, options=True):
9513 if have_serializers: 9514 json = serializers.json 9515 else: 9516 raise ImportError("No json serializers available") 9517 d = self.as_dict(flat=True, sanitize=sanitize, 9518 options=options) 9519 return json(d)
9520
9521 - def as_yaml(self, sanitize=True, options=True):
9522 if have_serializers: 9523 d = self.as_dict(flat=True, sanitize=sanitize, 9524 options=options) 9525 return serializers.yaml(d) 9526 else: 9527 raise ImportError("No YAML serializers available")
9528
9529 - def __nonzero__(self):
9530 return True
9531
9532 - def __str__(self):
9533 try: 9534 return '%s.%s' % (self.tablename, self.name) 9535 except: 9536 return '<no table>.%s' % self.name
9537
9538 9539 -class Query(object):
9540 9541 """ 9542 a query object necessary to define a set. 9543 it can be stored or can be passed to DAL.__call__() to obtain a Set 9544 9545 Example:: 9546 9547 query = db.users.name=='Max' 9548 set = db(query) 9549 records = set.select() 9550 9551 """ 9552
9553 - def __init__( 9554 self, 9555 db, 9556 op, 9557 first=None, 9558 second=None, 9559 ignore_common_filters = False, 9560 **optional_args 9561 ):
9562 self.db = self._db = db 9563 self.op = op 9564 self.first = first 9565 self.second = second 9566 self.ignore_common_filters = ignore_common_filters 9567 self.optional_args = optional_args
9568
9569 - def __repr__(self):
9570 return '<Query %s>' % BaseAdapter.expand(self.db._adapter,self)
9571
9572 - def __str__(self):
9573 return self.db._adapter.expand(self)
9574
9575 - def __and__(self, other):
9576 return Query(self.db,self.db._adapter.AND,self,other)
9577
9578 - def __or__(self, other):
9579 return Query(self.db,self.db._adapter.OR,self,other)
9580
9581 - def __invert__(self):
9582 if self.op==self.db._adapter.NOT: 9583 return self.first 9584 return Query(self.db,self.db._adapter.NOT,self)
9585
9586 - def __eq__(self, other):
9587 return repr(self) == repr(other)
9588
9589 - def __ne__(self, other):
9590 return not (self == other)
9591
9592 - def case(self,t=1,f=0):
9593 return self.db._adapter.CASE(self,t,f)
9594
9595 - def as_dict(self, flat=False, sanitize=True):
9596 """Experimental stuff 9597 9598 This allows to return a plain dictionary with the basic 9599 query representation. Can be used with json/xml services 9600 for client-side db I/O 9601 9602 Example: 9603 >>> q = db.auth_user.id != 0 9604 >>> q.as_dict(flat=True) 9605 {"op": "NE", "first":{"tablename": "auth_user", 9606 "fieldname": "id"}, 9607 "second":0} 9608 """ 9609 9610 SERIALIZABLE_TYPES = (tuple, dict, list, int, long, float, 9611 basestring, type(None), bool) 9612 def loop(d): 9613 newd = dict() 9614 for k, v in d.items(): 9615 if k in ("first", "second"): 9616 if isinstance(v, self.__class__): 9617 newd[k] = loop(v.__dict__) 9618 elif isinstance(v, Field): 9619 newd[k] = {"tablename": v._tablename, 9620 "fieldname": v.name} 9621 elif isinstance(v, Expression): 9622 newd[k] = loop(v.__dict__) 9623 elif isinstance(v, SERIALIZABLE_TYPES): 9624 newd[k] = v 9625 elif isinstance(v, (datetime.date, 9626 datetime.time, 9627 datetime.datetime)): 9628 newd[k] = unicode(v) 9629 elif k == "op": 9630 if callable(v): 9631 newd[k] = v.__name__ 9632 elif isinstance(v, basestring): 9633 newd[k] = v 9634 else: pass # not callable or string 9635 elif isinstance(v, SERIALIZABLE_TYPES): 9636 if isinstance(v, dict): 9637 newd[k] = loop(v) 9638 else: newd[k] = v 9639 return newd
9640 9641 if flat: 9642 return loop(self.__dict__) 9643 else: return self.__dict__
9644 9645
9646 - def as_xml(self, sanitize=True):
9647 if have_serializers: 9648 xml = serializers.xml 9649 else: 9650 raise ImportError("No xml serializers available") 9651 d = self.as_dict(flat=True, sanitize=sanitize) 9652 return xml(d)
9653
9654 - def as_json(self, sanitize=True):
9655 if have_serializers: 9656 json = serializers.json 9657 else: 9658 raise ImportError("No json serializers available") 9659 d = self.as_dict(flat=True, sanitize=sanitize) 9660 return json(d)
9661
9662 -def xorify(orderby):
9663 if not orderby: 9664 return None 9665 orderby2 = orderby[0] 9666 for item in orderby[1:]: 9667 orderby2 = orderby2 | item 9668 return orderby2
9669
9670 -def use_common_filters(query):
9671 return (query and hasattr(query,'ignore_common_filters') and \ 9672 not query.ignore_common_filters)
9673
9674 -class Set(object):
9675 9676 """ 9677 a Set represents a set of records in the database, 9678 the records are identified by the query=Query(...) object. 9679 normally the Set is generated by DAL.__call__(Query(...)) 9680 9681 given a set, for example 9682 set = db(db.users.name=='Max') 9683 you can: 9684 set.update(db.users.name='Massimo') 9685 set.delete() # all elements in the set 9686 set.select(orderby=db.users.id, groupby=db.users.name, limitby=(0,10)) 9687 and take subsets: 9688 subset = set(db.users.id<5) 9689 """ 9690
9691 - def __init__(self, db, query, ignore_common_filters = None):
9692 self.db = db 9693 self._db = db # for backward compatibility 9694 self.dquery = None 9695 9696 # if query is a dict, parse it 9697 if isinstance(query, dict): 9698 query = self.parse(query) 9699 9700 if not ignore_common_filters is None and \ 9701 use_common_filters(query) == ignore_common_filters: 9702 query = copy.copy(query) 9703 query.ignore_common_filters = ignore_common_filters 9704 self.query = query
9705
9706 - def __repr__(self):
9707 return '<Set %s>' % BaseAdapter.expand(self.db._adapter,self.query)
9708
9709 - def __call__(self, query, ignore_common_filters=False):
9710 if isinstance(query,Table): 9711 query = self.db._adapter.id_query(query) 9712 elif isinstance(query,str): 9713 query = Expression(self.db,query) 9714 elif isinstance(query,Field): 9715 query = query!=None 9716 if self.query: 9717 return Set(self.db, self.query & query, 9718 ignore_common_filters=ignore_common_filters) 9719 else: 9720 return Set(self.db, query, 9721 ignore_common_filters=ignore_common_filters)
9722
9723 - def _count(self,distinct=None):
9724 return self.db._adapter._count(self.query,distinct)
9725
9726 - def _select(self, *fields, **attributes):
9727 adapter = self.db._adapter 9728 tablenames = adapter.tables(self.query, 9729 attributes.get('join',None), 9730 attributes.get('left',None), 9731 attributes.get('orderby',None), 9732 attributes.get('groupby',None)) 9733 fields = adapter.expand_all(fields, tablenames) 9734 return adapter._select(self.query,fields,attributes)
9735
9736 - def _delete(self):
9737 db = self.db 9738 tablename = db._adapter.get_table(self.query) 9739 return db._adapter._delete(tablename,self.query)
9740
9741 - def _update(self, **update_fields):
9742 db = self.db 9743 tablename = db._adapter.get_table(self.query) 9744 fields = db[tablename]._listify(update_fields,update=True) 9745 return db._adapter._update(tablename,self.query,fields)
9746
9747 - def as_dict(self, flat=False, sanitize=True):
9748 if flat: 9749 uid = dbname = uri = None 9750 codec = self.db._db_codec 9751 if not sanitize: 9752 uri, dbname, uid = (self.db._dbname, str(self.db), 9753 self.db._db_uid) 9754 d = {"query": self.query.as_dict(flat=flat)} 9755 d["db"] = {"uid": uid, "codec": codec, 9756 "name": dbname, "uri": uri} 9757 return d 9758 else: return self.__dict__
9759
9760 - def as_xml(self, sanitize=True):
9761 if have_serializers: 9762 xml = serializers.xml 9763 else: 9764 raise ImportError("No xml serializers available") 9765 d = self.as_dict(flat=True, sanitize=sanitize) 9766 return xml(d)
9767
9768 - def as_json(self, sanitize=True):
9769 if have_serializers: 9770 json = serializers.json 9771 else: 9772 raise ImportError("No json serializers available") 9773 d = self.as_dict(flat=True, sanitize=sanitize) 9774 return json(d)
9775
9776 - def parse(self, dquery):
9777 "Experimental: Turn a dictionary into a Query object" 9778 self.dquery = dquery 9779 return self.build(self.dquery)
9780
9781 - def build(self, d):
9782 "Experimental: see .parse()" 9783 op, first, second = (d["op"], d["first"], 9784 d.get("second", None)) 9785 left = right = built = None 9786 9787 if op in ("AND", "OR"): 9788 if not (type(first), type(second)) == (dict, dict): 9789 raise SyntaxError("Invalid AND/OR query") 9790 if op == "AND": 9791 built = self.build(first) & self.build(second) 9792 else: built = self.build(first) | self.build(second) 9793 9794 elif op == "NOT": 9795 if first is None: 9796 raise SyntaxError("Invalid NOT query") 9797 built = ~self.build(first) 9798 else: 9799 # normal operation (GT, EQ, LT, ...) 9800 for k, v in {"left": first, "right": second}.items(): 9801 if isinstance(v, dict) and v.get("op"): 9802 v = self.build(v) 9803 if isinstance(v, dict) and ("tablename" in v): 9804 v = self.db[v["tablename"]][v["fieldname"]] 9805 if k == "left": left = v 9806 else: right = v 9807 9808 if hasattr(self.db._adapter, op): 9809 opm = getattr(self.db._adapter, op) 9810 9811 if op == "EQ": built = left == right 9812 elif op == "NE": built = left != right 9813 elif op == "GT": built = left > right 9814 elif op == "GE": built = left >= right 9815 elif op == "LT": built = left < right 9816 elif op == "LE": built = left <= right 9817 elif op in ("JOIN", "LEFT_JOIN", "RANDOM", "ALLOW_NULL"): 9818 built = Expression(self.db, opm) 9819 elif op in ("LOWER", "UPPER", "EPOCH", "PRIMARY_KEY", 9820 "COALESCE_ZERO", "RAW", "INVERT"): 9821 built = Expression(self.db, opm, left) 9822 elif op in ("COUNT", "EXTRACT", "AGGREGATE", "SUBSTRING", 9823 "REGEXP", "LIKE", "ILIKE", "STARTSWITH", 9824 "ENDSWITH", "ADD", "SUB", "MUL", "DIV", 9825 "MOD", "AS", "ON", "COMMA", "NOT_NULL", 9826 "COALESCE", "CONTAINS", "BELONGS"): 9827 built = Expression(self.db, opm, left, right) 9828 # expression as string 9829 elif not (left or right): built = Expression(self.db, op) 9830 else: 9831 raise SyntaxError("Operator not supported: %s" % op) 9832 9833 return built
9834
9835 - def isempty(self):
9836 return not self.select(limitby=(0,1))
9837
9838 - def count(self,distinct=None, cache=None):
9839 db = self.db 9840 if cache: 9841 cache_model, time_expire = cache 9842 sql = self._count(distinct=distinct) 9843 key = db._uri + '/' + sql 9844 if len(key)>200: key = hashlib_md5(key).hexdigest() 9845 return cache_model( 9846 key, 9847 (lambda self=self,distinct=distinct: \ 9848 db._adapter.count(self.query,distinct)), 9849 time_expire) 9850 return db._adapter.count(self.query,distinct)
9851
9852 - def select(self, *fields, **attributes):
9853 adapter = self.db._adapter 9854 tablenames = adapter.tables(self.query, 9855 attributes.get('join',None), 9856 attributes.get('left',None), 9857 attributes.get('orderby',None), 9858 attributes.get('groupby',None)) 9859 fields = adapter.expand_all(fields, tablenames) 9860 return adapter.select(self.query,fields,attributes)
9861
9862 - def nested_select(self,*fields,**attributes):
9863 return Expression(self.db,self._select(*fields,**attributes))
9864
9865 - def delete(self):
9866 db = self.db 9867 tablename = db._adapter.get_table(self.query) 9868 table = db[tablename] 9869 if any(f(self) for f in table._before_delete): return 0 9870 ret = db._adapter.delete(tablename,self.query) 9871 ret and [f(self) for f in table._after_delete] 9872 return ret
9873
9874 - def update(self, **update_fields):
9875 db = self.db 9876 tablename = db._adapter.get_table(self.query) 9877 table = db[tablename] 9878 table._attempt_upload(update_fields) 9879 if any(f(self,update_fields) for f in table._before_update): 9880 return 0 9881 fields = table._listify(update_fields,update=True) 9882 if not fields: 9883 raise SyntaxError("No fields to update") 9884 ret = db._adapter.update(tablename,self.query,fields) 9885 ret and [f(self,update_fields) for f in table._after_update] 9886 return ret
9887
9888 - def update_naive(self, **update_fields):
9889 """ 9890 same as update but does not call table._before_update and _after_update 9891 """ 9892 tablename = self.db._adapter.get_table(self.query) 9893 table = self.db[tablename] 9894 fields = table._listify(update_fields,update=True) 9895 if not fields: raise SyntaxError("No fields to update") 9896 ret = self.db._adapter.update(tablename,self.query,fields) 9897 return ret
9898
9899 - def validate_and_update(self, **update_fields):
9900 tablename = self.db._adapter.get_table(self.query) 9901 response = Row() 9902 response.errors = Row() 9903 new_fields = copy.copy(update_fields) 9904 for key,value in update_fields.iteritems(): 9905 value,error = self.db[tablename][key].validate(value) 9906 if error: 9907 response.errors[key] = error 9908 else: 9909 new_fields[key] = value 9910 table = self.db[tablename] 9911 if response.errors: 9912 response.updated = None 9913 else: 9914 if not any(f(self,new_fields) for f in table._before_update): 9915 fields = table._listify(new_fields,update=True) 9916 if not fields: raise SyntaxError("No fields to update") 9917 ret = self.db._adapter.update(tablename,self.query,fields) 9918 ret and [f(self,new_fields) for f in table._after_update] 9919 else: 9920 ret = 0 9921 response.updated = ret 9922 return response
9923
9924 - def delete_uploaded_files(self, upload_fields=None):
9925 table = self.db[self.db._adapter.tables(self.query)[0]] 9926 # ## mind uploadfield==True means file is not in DB 9927 if upload_fields: 9928 fields = upload_fields.keys() 9929 else: 9930 fields = table.fields 9931 fields = [f for f in fields if table[f].type == 'upload' 9932 and table[f].uploadfield == True 9933 and table[f].autodelete] 9934 if not fields: 9935 return False 9936 for record in self.select(*[table[f] for f in fields]): 9937 for fieldname in fields: 9938 field = table[fieldname] 9939 oldname = record.get(fieldname, None) 9940 if not oldname: 9941 continue 9942 if upload_fields and oldname == upload_fields[fieldname]: 9943 continue 9944 if field.custom_delete: 9945 field.custom_delete(oldname) 9946 else: 9947 uploadfolder = field.uploadfolder 9948 if not uploadfolder: 9949 uploadfolder = pjoin( 9950 self.db._adapter.folder, '..', 'uploads') 9951 if field.uploadseparate: 9952 items = oldname.split('.') 9953 uploadfolder = pjoin( 9954 uploadfolder, 9955 "%s.%s" % (items[0], items[1]), 9956 items[2][:2]) 9957 oldpath = pjoin(uploadfolder, oldname) 9958 if exists(oldpath): 9959 os.unlink(oldpath) 9960 return False
9961
9962 -class RecordUpdater(object):
9963 - def __init__(self, colset, table, id):
9964 self.colset, self.db, self.tablename, self.id = \ 9965 colset, table._db, table._tablename, id
9966
9967 - def __call__(self, **fields):
9968 colset, db, tablename, id = self.colset, self.db, self.tablename, self.id 9969 table = db[tablename] 9970 newfields = fields or dict(colset) 9971 for fieldname in newfields.keys(): 9972 if not fieldname in table.fields or table[fieldname].type=='id': 9973 del newfields[fieldname] 9974 table._db(table._id==id,ignore_common_filters=True).update(**newfields) 9975 colset.update(newfields) 9976 return colset
9977
9978 -class RecordDeleter(object):
9979 - def __init__(self, table, id):
9980 self.db, self.tablename, self.id = table._db, table._tablename, id
9981 - def __call__(self):
9982 return self.db(self.db[self.tablename]._id==self.id).delete()
9983
9984 -class LazySet(object):
9985 - def __init__(self, field, id):
9986 self.db, self.tablename, self.fieldname, self.id = \ 9987 field.db, field._tablename, field.name, id
9988 - def _getset(self):
9989 query = self.db[self.tablename][self.fieldname]==self.id 9990 return Set(self.db,query)
9991 - def __repr__(self):
9992 return repr(self._getset())
9993 - def __call__(self, query, ignore_common_filters=False):
9994 return self._getset()(query, ignore_common_filters)
9995 - def _count(self,distinct=None):
9996 return self._getset()._count(distinct)
9997 - def _select(self, *fields, **attributes):
9998 return self._getset()._select(*fields,**attributes)
9999 - def _delete(self):
10000 return self._getset()._delete()
10001 - def _update(self, **update_fields):
10002 return self._getset()._update(**update_fields)
10003 - def isempty(self):
10004 return self._getset().isempty()
10005 - def count(self,distinct=None, cache=None):
10006 return self._getset().count(distinct,cache)
10007 - def select(self, *fields, **attributes):
10008 return self._getset().select(*fields,**attributes)
10009 - def nested_select(self,*fields,**attributes):
10010 return self._getset().nested_select(*fields,**attributes)
10011 - def delete(self):
10012 return self._getset().delete()
10013 - def update(self, **update_fields):
10014 return self._getset().update(**update_fields)
10015 - def update_naive(self, **update_fields):
10016 return self._getset().update_naive(**update_fields)
10017 - def validate_and_update(self, **update_fields):
10018 return self._getset().validate_and_update(**update_fields)
10019 - def delete_uploaded_files(self, upload_fields=None):
10020 return self._getset().delete_uploaded_files(upload_fields)
10021
10022 -class VirtualCommand(object):
10023 - def __init__(self,method,row):
10024 self.method=method 10025 self.row=row
10026 - def __call__(self,*args,**kwargs):
10027 return self.method(self.row,*args,**kwargs)
10028
10029 -def lazy_virtualfield(f):
10030 f.__lazy__ = True 10031 return f
10032
10033 -class Rows(object):
10034 10035 """ 10036 A wrapper for the return value of a select. It basically represents a table. 10037 It has an iterator and each row is represented as a dictionary. 10038 """ 10039 10040 # ## TODO: this class still needs some work to care for ID/OID 10041
10042 - def __init__( 10043 self, 10044 db=None, 10045 records=[], 10046 colnames=[], 10047 compact=True, 10048 rawrows=None 10049 ):
10050 self.db = db 10051 self.records = records 10052 self.colnames = colnames 10053 self.compact = compact 10054 self.response = rawrows
10055
10056 - def __repr__(self):
10057 return '<Rows (%s)>' % len(self.records)
10058
10059 - def setvirtualfields(self,**keyed_virtualfields):
10060 """ 10061 db.define_table('x',Field('number','integer')) 10062 if db(db.x).isempty(): [db.x.insert(number=i) for i in range(10)] 10063 10064 from gluon.dal import lazy_virtualfield 10065 10066 class MyVirtualFields(object): 10067 # normal virtual field (backward compatible, discouraged) 10068 def normal_shift(self): return self.x.number+1 10069 # lazy virtual field (because of @staticmethod) 10070 @lazy_virtualfield 10071 def lazy_shift(instance,row,delta=4): return row.x.number+delta 10072 db.x.virtualfields.append(MyVirtualFields()) 10073 10074 for row in db(db.x).select(): 10075 print row.number, row.normal_shift, row.lazy_shift(delta=7) 10076 """ 10077 if not keyed_virtualfields: 10078 return self 10079 for row in self.records: 10080 for (tablename,virtualfields) in keyed_virtualfields.iteritems(): 10081 attributes = dir(virtualfields) 10082 if not tablename in row: 10083 box = row[tablename] = Row() 10084 else: 10085 box = row[tablename] 10086 updated = False 10087 for attribute in attributes: 10088 if attribute[0] != '_': 10089 method = getattr(virtualfields,attribute) 10090 if hasattr(method,'__lazy__'): 10091 box[attribute]=VirtualCommand(method,row) 10092 elif type(method)==types.MethodType: 10093 if not updated: 10094 virtualfields.__dict__.update(row) 10095 updated = True 10096 box[attribute]=method() 10097 return self
10098
10099 - def __and__(self,other):
10100 if self.colnames!=other.colnames: 10101 raise Exception('Cannot & incompatible Rows objects') 10102 records = self.records+other.records 10103 return Rows(self.db,records,self.colnames)
10104
10105 - def __or__(self,other):
10106 if self.colnames!=other.colnames: 10107 raise Exception('Cannot | incompatible Rows objects') 10108 records = self.records 10109 records += [record for record in other.records \ 10110 if not record in records] 10111 return Rows(self.db,records,self.colnames)
10112
10113 - def __nonzero__(self):
10114 if len(self.records): 10115 return 1 10116 return 0
10117
10118 - def __len__(self):
10119 return len(self.records)
10120
10121 - def __getslice__(self, a, b):
10122 return Rows(self.db,self.records[a:b],self.colnames)
10123
10124 - def __getitem__(self, i):
10125 row = self.records[i] 10126 keys = row.keys() 10127 if self.compact and len(keys) == 1 and keys[0] != '_extra': 10128 return row[row.keys()[0]] 10129 return row
10130
10131 - def __iter__(self):
10132 """ 10133 iterator over records 10134 """ 10135 10136 for i in xrange(len(self)): 10137 yield self[i]
10138
10139 - def __str__(self):
10140 """ 10141 serializes the table into a csv file 10142 """ 10143 10144 s = StringIO.StringIO() 10145 self.export_to_csv_file(s) 10146 return s.getvalue()
10147
10148 - def first(self):
10149 if not self.records: 10150 return None 10151 return self[0]
10152
10153 - def last(self):
10154 if not self.records: 10155 return None 10156 return self[-1]
10157
10158 - def find(self,f,limitby=None):
10159 """ 10160 returns a new Rows object, a subset of the original object, 10161 filtered by the function f 10162 """ 10163 if not self: 10164 return Rows(self.db, [], self.colnames) 10165 records = [] 10166 if limitby: 10167 a,b = limitby 10168 else: 10169 a,b = 0,len(self) 10170 k = 0 10171 for row in self: 10172 if f(row): 10173 if a<=k: records.append(row) 10174 k += 1 10175 if k==b: break 10176 return Rows(self.db, records, self.colnames)
10177
10178 - def exclude(self, f):
10179 """ 10180 removes elements from the calling Rows object, filtered by the function f, 10181 and returns a new Rows object containing the removed elements 10182 """ 10183 if not self.records: 10184 return Rows(self.db, [], self.colnames) 10185 removed = [] 10186 i=0 10187 while i<len(self): 10188 row = self[i] 10189 if f(row): 10190 removed.append(self.records[i]) 10191 del self.records[i] 10192 else: 10193 i += 1 10194 return Rows(self.db, removed, self.colnames)
10195
10196 - def sort(self, f, reverse=False):
10197 """ 10198 returns a list of sorted elements (not sorted in place) 10199 """ 10200 rows = Rows(self.db,[],self.colnames,compact=False) 10201 rows.records = sorted(self,key=f,reverse=reverse) 10202 return rows
10203 10204
10205 - def group_by_value(self, field):
10206 """ 10207 regroups the rows, by one of the fields 10208 """ 10209 if not self.records: 10210 return {} 10211 key = str(field) 10212 grouped_row_group = dict() 10213 10214 for row in self: 10215 value = row[key] 10216 if not value in grouped_row_group: 10217 grouped_row_group[value] = [row] 10218 else: 10219 grouped_row_group[value].append(row) 10220 return grouped_row_group
10221
10222 - def as_list(self, 10223 compact=True, 10224 storage_to_dict=True, 10225 datetime_to_str=True, 10226 custom_types=None):
10227 """ 10228 returns the data as a list or dictionary. 10229 :param storage_to_dict: when True returns a dict, otherwise a list(default True) 10230 :param datetime_to_str: convert datetime fields as strings (default True) 10231 """ 10232 (oc, self.compact) = (self.compact, compact) 10233 if storage_to_dict: 10234 items = [item.as_dict(datetime_to_str, custom_types) for item in self] 10235 else: 10236 items = [item for item in self] 10237 self.compact = compact 10238 return items
10239 10240
10241 - def as_dict(self, 10242 key='id', 10243 compact=True, 10244 storage_to_dict=True, 10245 datetime_to_str=True, 10246 custom_types=None):
10247 """ 10248 returns the data as a dictionary of dictionaries (storage_to_dict=True) or records (False) 10249 10250 :param key: the name of the field to be used as dict key, normally the id 10251 :param compact: ? (default True) 10252 :param storage_to_dict: when True returns a dict, otherwise a list(default True) 10253 :param datetime_to_str: convert datetime fields as strings (default True) 10254 """ 10255 10256 # test for multiple rows 10257 multi = False 10258 f = self.first() 10259 if f: 10260 multi = any([isinstance(v, f.__class__) for v in f.values()]) 10261 if (not "." in key) and multi: 10262 # No key provided, default to int indices 10263 def new_key(): 10264 i = 0 10265 while True: 10266 yield i 10267 i += 1
10268 key_generator = new_key() 10269 key = lambda r: key_generator.next() 10270 10271 rows = self.as_list(compact, storage_to_dict, datetime_to_str, custom_types) 10272 if isinstance(key,str) and key.count('.')==1: 10273 (table, field) = key.split('.') 10274 return dict([(r[table][field],r) for r in rows]) 10275 elif isinstance(key,str): 10276 return dict([(r[key],r) for r in rows]) 10277 else: 10278 return dict([(key(r),r) for r in rows])
10279
10280 - def export_to_csv_file(self, ofile, null='<NULL>', *args, **kwargs):
10281 """ 10282 export data to csv, the first line contains the column names 10283 10284 :param ofile: where the csv must be exported to 10285 :param null: how null values must be represented (default '<NULL>') 10286 :param delimiter: delimiter to separate values (default ',') 10287 :param quotechar: character to use to quote string values (default '"') 10288 :param quoting: quote system, use csv.QUOTE_*** (default csv.QUOTE_MINIMAL) 10289 :param represent: use the fields .represent value (default False) 10290 :param colnames: list of column names to use (default self.colnames) 10291 This will only work when exporting rows objects!!!! 10292 DO NOT use this with db.export_to_csv() 10293 """ 10294 delimiter = kwargs.get('delimiter', ',') 10295 quotechar = kwargs.get('quotechar', '"') 10296 quoting = kwargs.get('quoting', csv.QUOTE_MINIMAL) 10297 represent = kwargs.get('represent', False) 10298 writer = csv.writer(ofile, delimiter=delimiter, 10299 quotechar=quotechar, quoting=quoting) 10300 colnames = kwargs.get('colnames', self.colnames) 10301 write_colnames = kwargs.get('write_colnames',True) 10302 # a proper csv starting with the column names 10303 if write_colnames: 10304 writer.writerow(colnames) 10305 10306 def none_exception(value): 10307 """ 10308 returns a cleaned up value that can be used for csv export: 10309 - unicode text is encoded as such 10310 - None values are replaced with the given representation (default <NULL>) 10311 """ 10312 if value is None: 10313 return null 10314 elif isinstance(value, unicode): 10315 return value.encode('utf8') 10316 elif isinstance(value,Reference): 10317 return int(value) 10318 elif hasattr(value, 'isoformat'): 10319 return value.isoformat()[:19].replace('T', ' ') 10320 elif isinstance(value, (list,tuple)): # for type='list:..' 10321 return bar_encode(value) 10322 return value
10323 10324 for record in self: 10325 row = [] 10326 for col in colnames: 10327 if not REGEX_TABLE_DOT_FIELD.match(col): 10328 row.append(record._extra[col]) 10329 else: 10330 (t, f) = col.split('.') 10331 field = self.db[t][f] 10332 if isinstance(record.get(t, None), (Row,dict)): 10333 value = record[t][f] 10334 else: 10335 value = record[f] 10336 if field.type=='blob' and not value is None: 10337 value = base64.b64encode(value) 10338 elif represent and field.represent: 10339 value = field.represent(value) 10340 row.append(none_exception(value)) 10341 writer.writerow(row) 10342
10343 - def xml(self,strict=False,row_name='row',rows_name='rows'):
10344 """ 10345 serializes the table using sqlhtml.SQLTABLE (if present) 10346 """ 10347 10348 if strict: 10349 ncols = len(self.colnames) 10350 return '<%s>\n%s\n</%s>' % (rows_name, 10351 '\n'.join(row.as_xml(row_name=row_name, 10352 colnames=self.colnames) for 10353 row in self), rows_name) 10354 10355 import sqlhtml 10356 return sqlhtml.SQLTABLE(self).xml()
10357
10358 - def as_xml(self,row_name='row',rows_name='rows'):
10359 return self.xml(strict=True, row_name=row_name, rows_name=rows_name)
10360
10361 - def as_json(self, mode='object', default=None):
10362 """ 10363 serializes the table to a JSON list of objects 10364 """ 10365 10366 items = [record.as_json(mode=mode, default=default, 10367 serialize=False, 10368 colnames=self.colnames) for 10369 record in self] 10370 10371 if have_serializers: 10372 return serializers.json(items, 10373 default=default or 10374 serializers.custom_json) 10375 elif simplejson: 10376 return simplejson.dumps(items) 10377 else: 10378 raise RuntimeError("missing simplejson")
10379 10380 # for consistent naming yet backwards compatible 10381 as_csv = __str__ 10382 json = as_json 10383
10384 ################################################################################ 10385 # dummy function used to define some doctests 10386 ################################################################################ 10387 10388 -def test_all():
10389 """ 10390 10391 >>> if len(sys.argv)<2: db = DAL(\"sqlite://test.db\") 10392 >>> if len(sys.argv)>1: db = DAL(sys.argv[1]) 10393 >>> tmp = db.define_table('users',\ 10394 Field('stringf', 'string', length=32, required=True),\ 10395 Field('booleanf', 'boolean', default=False),\ 10396 Field('passwordf', 'password', notnull=True),\ 10397 Field('uploadf', 'upload'),\ 10398 Field('blobf', 'blob'),\ 10399 Field('integerf', 'integer', unique=True),\ 10400 Field('doublef', 'double', unique=True,notnull=True),\ 10401 Field('jsonf', 'json'),\ 10402 Field('datef', 'date', default=datetime.date.today()),\ 10403 Field('timef', 'time'),\ 10404 Field('datetimef', 'datetime'),\ 10405 migrate='test_user.table') 10406 10407 Insert a field 10408 10409 >>> db.users.insert(stringf='a', booleanf=True, passwordf='p', blobf='0A',\ 10410 uploadf=None, integerf=5, doublef=3.14,\ 10411 jsonf={"j": True},\ 10412 datef=datetime.date(2001, 1, 1),\ 10413 timef=datetime.time(12, 30, 15),\ 10414 datetimef=datetime.datetime(2002, 2, 2, 12, 30, 15)) 10415 1 10416 10417 Drop the table 10418 10419 >>> db.users.drop() 10420 10421 Examples of insert, select, update, delete 10422 10423 >>> tmp = db.define_table('person',\ 10424 Field('name'),\ 10425 Field('birth','date'),\ 10426 migrate='test_person.table') 10427 >>> person_id = db.person.insert(name=\"Marco\",birth='2005-06-22') 10428 >>> person_id = db.person.insert(name=\"Massimo\",birth='1971-12-21') 10429 10430 commented len(db().select(db.person.ALL)) 10431 commented 2 10432 10433 >>> me = db(db.person.id==person_id).select()[0] # test select 10434 >>> me.name 10435 'Massimo' 10436 >>> db.person[2].name 10437 'Massimo' 10438 >>> db.person(2).name 10439 'Massimo' 10440 >>> db.person(name='Massimo').name 10441 'Massimo' 10442 >>> db.person(db.person.name=='Massimo').name 10443 'Massimo' 10444 >>> row = db.person[2] 10445 >>> row.name == row['name'] == row['person.name'] == row('person.name') 10446 True 10447 >>> db(db.person.name=='Massimo').update(name='massimo') # test update 10448 1 10449 >>> db(db.person.name=='Marco').select().first().delete_record() # test delete 10450 1 10451 10452 Update a single record 10453 10454 >>> me.update_record(name=\"Max\") 10455 <Row {'name': 'Max', 'birth': datetime.date(1971, 12, 21), 'id': 2}> 10456 >>> me.name 10457 'Max' 10458 10459 Examples of complex search conditions 10460 10461 >>> len(db((db.person.name=='Max')&(db.person.birth<'2003-01-01')).select()) 10462 1 10463 >>> len(db((db.person.name=='Max')&(db.person.birth<datetime.date(2003,01,01))).select()) 10464 1 10465 >>> len(db((db.person.name=='Max')|(db.person.birth<'2003-01-01')).select()) 10466 1 10467 >>> me = db(db.person.id==person_id).select(db.person.name)[0] 10468 >>> me.name 10469 'Max' 10470 10471 Examples of search conditions using extract from date/datetime/time 10472 10473 >>> len(db(db.person.birth.month()==12).select()) 10474 1 10475 >>> len(db(db.person.birth.year()>1900).select()) 10476 1 10477 10478 Example of usage of NULL 10479 10480 >>> len(db(db.person.birth==None).select()) ### test NULL 10481 0 10482 >>> len(db(db.person.birth!=None).select()) ### test NULL 10483 1 10484 10485 Examples of search conditions using lower, upper, and like 10486 10487 >>> len(db(db.person.name.upper()=='MAX').select()) 10488 1 10489 >>> len(db(db.person.name.like('%ax')).select()) 10490 1 10491 >>> len(db(db.person.name.upper().like('%AX')).select()) 10492 1 10493 >>> len(db(~db.person.name.upper().like('%AX')).select()) 10494 0 10495 10496 orderby, groupby and limitby 10497 10498 >>> people = db().select(db.person.name, orderby=db.person.name) 10499 >>> order = db.person.name|~db.person.birth 10500 >>> people = db().select(db.person.name, orderby=order) 10501 10502 >>> people = db().select(db.person.name, orderby=db.person.name, groupby=db.person.name) 10503 10504 >>> people = db().select(db.person.name, orderby=order, limitby=(0,100)) 10505 10506 Example of one 2 many relation 10507 10508 >>> tmp = db.define_table('dog',\ 10509 Field('name'),\ 10510 Field('birth','date'),\ 10511 Field('owner',db.person),\ 10512 migrate='test_dog.table') 10513 >>> db.dog.insert(name='Snoopy', birth=None, owner=person_id) 10514 1 10515 10516 A simple JOIN 10517 10518 >>> len(db(db.dog.owner==db.person.id).select()) 10519 1 10520 10521 >>> len(db().select(db.person.ALL, db.dog.name,left=db.dog.on(db.dog.owner==db.person.id))) 10522 1 10523 10524 Drop tables 10525 10526 >>> db.dog.drop() 10527 >>> db.person.drop() 10528 10529 Example of many 2 many relation and Set 10530 10531 >>> tmp = db.define_table('author', Field('name'),\ 10532 migrate='test_author.table') 10533 >>> tmp = db.define_table('paper', Field('title'),\ 10534 migrate='test_paper.table') 10535 >>> tmp = db.define_table('authorship',\ 10536 Field('author_id', db.author),\ 10537 Field('paper_id', db.paper),\ 10538 migrate='test_authorship.table') 10539 >>> aid = db.author.insert(name='Massimo') 10540 >>> pid = db.paper.insert(title='QCD') 10541 >>> tmp = db.authorship.insert(author_id=aid, paper_id=pid) 10542 10543 Define a Set 10544 10545 >>> authored_papers = db((db.author.id==db.authorship.author_id)&(db.paper.id==db.authorship.paper_id)) 10546 >>> rows = authored_papers.select(db.author.name, db.paper.title) 10547 >>> for row in rows: print row.author.name, row.paper.title 10548 Massimo QCD 10549 10550 Example of search condition using belongs 10551 10552 >>> set = (1, 2, 3) 10553 >>> rows = db(db.paper.id.belongs(set)).select(db.paper.ALL) 10554 >>> print rows[0].title 10555 QCD 10556 10557 Example of search condition using nested select 10558 10559 >>> nested_select = db()._select(db.authorship.paper_id) 10560 >>> rows = db(db.paper.id.belongs(nested_select)).select(db.paper.ALL) 10561 >>> print rows[0].title 10562 QCD 10563 10564 Example of expressions 10565 10566 >>> mynumber = db.define_table('mynumber', Field('x', 'integer')) 10567 >>> db(mynumber).delete() 10568 0 10569 >>> for i in range(10): tmp = mynumber.insert(x=i) 10570 >>> db(mynumber).select(mynumber.x.sum())[0](mynumber.x.sum()) 10571 45 10572 10573 >>> db(mynumber.x+2==5).select(mynumber.x + 2)[0](mynumber.x + 2) 10574 5 10575 10576 Output in csv 10577 10578 >>> print str(authored_papers.select(db.author.name, db.paper.title)).strip() 10579 author.name,paper.title\r 10580 Massimo,QCD 10581 10582 Delete all leftover tables 10583 10584 >>> DAL.distributed_transaction_commit(db) 10585 10586 >>> db.mynumber.drop() 10587 >>> db.authorship.drop() 10588 >>> db.author.drop() 10589 >>> db.paper.drop() 10590 """
10591 ################################################################################ 10592 # deprecated since the new DAL; here only for backward compatibility 10593 ################################################################################ 10594 10595 SQLField = Field 10596 SQLTable = Table 10597 SQLXorable = Expression 10598 SQLQuery = Query 10599 SQLSet = Set 10600 SQLRows = Rows 10601 SQLStorage = Row 10602 SQLDB = DAL 10603 GQLDB = DAL 10604 DAL.Field = Field # was necessary in gluon/globals.py session.connect 10605 DAL.Table = Table # was necessary in gluon/globals.py session.connect
10606 10607 ################################################################################ 10608 # Geodal utils 10609 ################################################################################ 10610 10611 -def geoPoint(x,y):
10612 return "POINT (%f %f)" % (x,y)
10613
10614 -def geoLine(*line):
10615 return "LINESTRING (%s)" % ','.join("%f %f" % item for item in line)
10616
10617 -def geoPolygon(*line):
10618 return "POLYGON ((%s))" % ','.join("%f %f" % item for item in line)
10619 10620 ################################################################################ 10621 # run tests 10622 ################################################################################ 10623 10624 if __name__ == '__main__': 10625 import doctest 10626 doctest.testmod() 10627